am 025218c7: Clean up stlport usage.
* commit '025218c7e4330a4942b14f9a8f1f68bd3390261c':
Clean up stlport usage.
diff --git a/build/Android.common.mk b/build/Android.common.mk
index bfb1f9b..8209725 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -14,6 +14,9 @@
# limitations under the License.
#
+ifndef ANDROID_COMMON_MK
+ANDROID_COMMON_MK = true
+
# These can be overridden via the environment or by editing to
# enable/disable certain build configuration.
#
@@ -163,11 +166,8 @@
else
# Warn if not using GCC 4.6 for target builds when not doing a top-level or 'mma' build.
ifneq ($(ONE_SHOT_MAKEFILE),)
- ifneq ($(ART_THREAD_SAFETY_CHECK_WARNING),true)
- # Enable target GCC 4.6 with: export TARGET_GCC_VERSION_EXP=4.6
- $(info Using target GCC $(TARGET_GCC_VERSION) disables thread-safety checks.)
- ART_THREAD_SAFETY_CHECK_WARNING := true
- endif
+ # Enable target GCC 4.6 with: export TARGET_GCC_VERSION_EXP=4.6
+ $(info Using target GCC $(TARGET_GCC_VERSION) disables thread-safety checks.)
endif
endif
# We build with GCC 4.6 on the host.
@@ -219,3 +219,5 @@
ART_BUILD_HOST := true
ART_BUILD_DEBUG := true
endif
+
+endif # ANDROID_COMMON_MK
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index ee1115a..e069d88 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -17,6 +17,11 @@
LOCAL_PATH := art
TEST_COMMON_SRC_FILES := \
+ compiler/driver/compiler_driver_test.cc \
+ compiler/elf_writer_test.cc \
+ compiler/jni/jni_compiler_test.cc \
+ compiler/utils/arm/managed_register_arm_test.cc \
+ compiler/utils/x86/managed_register_x86_test.cc \
runtime/barrier_test.cc \
runtime/base/histogram_test.cc \
runtime/base/mutex_test.cc \
@@ -29,6 +34,7 @@
runtime/dex_file_test.cc \
runtime/dex_instruction_visitor_test.cc \
runtime/dex_method_iterator_test.cc \
+ runtime/entrypoints/math_entrypoints_test.cc \
runtime/exception_test.cc \
runtime/gc/accounting/space_bitmap_test.cc \
runtime/gc/heap_test.cc \
@@ -42,21 +48,15 @@
runtime/mem_map_test.cc \
runtime/mirror/dex_cache_test.cc \
runtime/mirror/object_test.cc \
- runtime/oat/utils/arm/managed_register_arm_test.cc \
- runtime/oat/utils/x86/managed_register_x86_test.cc \
runtime/oat_test.cc \
runtime/output_stream_test.cc \
runtime/reference_table_test.cc \
- runtime/runtime_support_test.cc \
runtime/runtime_test.cc \
runtime/thread_pool_test.cc \
runtime/utils_test.cc \
runtime/verifier/method_verifier_test.cc \
runtime/verifier/reg_type_test.cc \
- runtime/zip_archive_test.cc \
- compiler/driver/compiler_driver_test.cc \
- compiler/elf_writer_test.cc \
- compiler/jni/jni_compiler_test.cc
+ runtime/zip_archive_test.cc
ifeq ($(ART_SEA_IR_MODE),true)
TEST_COMMON_SRC_FILES += \
@@ -68,7 +68,7 @@
TEST_HOST_SRC_FILES := \
$(TEST_COMMON_SRC_FILES) \
- runtime/oat/utils/x86/assembler_x86_test.cc
+ compiler/utils/x86/assembler_x86_test.cc
ART_HOST_TEST_EXECUTABLES :=
ART_TARGET_TEST_EXECUTABLES :=
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 9f25022..df77853 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -76,12 +76,20 @@
llvm/runtime_support_builder_x86.cc \
stubs/portable/stubs.cc \
stubs/quick/stubs.cc \
+ utils/arm/assembler_arm.cc \
+ utils/arm/managed_register_arm.cc \
+ utils/assembler.cc \
+ utils/mips/assembler_mips.cc \
+ utils/mips/managed_register_mips.cc \
+ utils/x86/assembler_x86.cc \
+ utils/x86/managed_register_x86.cc \
elf_fixup.cc \
elf_stripper.cc \
elf_writer.cc \
elf_writer_quick.cc \
image_writer.cc \
- oat_writer.cc
+ oat_writer.cc \
+ vector_output_stream.cc
ifeq ($(ART_SEA_IR_MODE),true)
LIBART_COMPILER_SRC_FILES += \
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 64ebb6a..745e43d 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -19,7 +19,7 @@
#include "arm_lir.h"
#include "codegen_arm.h"
#include "dex/quick/mir_to_lir-inl.h"
-#include "oat/runtime/oat_support_entrypoints.h"
+#include "entrypoints/quick/quick_entrypoints.h"
namespace art {
@@ -432,7 +432,7 @@
// Making a call - use explicit registers
FlushAllRegs(); /* Everything to home location */
LoadValueDirectFixed(rl_src, r0);
- LoadWordDisp(rARM_SELF, ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode),
+ LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode),
rARM_LR);
// Materialize a pointer to the fill data image
NewLIR3(kThumb2Adr, r1, 0, reinterpret_cast<uintptr_t>(tab_rec));
@@ -488,7 +488,7 @@
OpRegImm(kOpCmp, r1, 0);
OpIT(kCondNe, "T");
// Go expensive route - artLockObjectFromCode(self, obj);
- LoadWordDisp(rARM_SELF, ENTRYPOINT_OFFSET(pLockObjectFromCode), rARM_LR);
+ LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pLockObjectFromCode), rARM_LR);
ClobberCalleeSave();
LIR* call_inst = OpReg(kOpBlx, rARM_LR);
MarkSafepointPC(call_inst);
@@ -519,7 +519,7 @@
OpIT(kCondEq, "EE");
StoreWordDisp(r0, mirror::Object::MonitorOffset().Int32Value(), r3);
// Go expensive route - UnlockObjectFromCode(obj);
- LoadWordDisp(rARM_SELF, ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARM_LR);
+ LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARM_LR);
ClobberCalleeSave();
LIR* call_inst = OpReg(kOpBlx, rARM_LR);
MarkSafepointPC(call_inst);
diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc
index 1bb08c4..08d6778 100644
--- a/compiler/dex/quick/arm/fp_arm.cc
+++ b/compiler/dex/quick/arm/fp_arm.cc
@@ -49,7 +49,8 @@
case Instruction::REM_FLOAT_2ADDR:
case Instruction::REM_FLOAT:
FlushAllRegs(); // Send everything to home location
- CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, false);
+ CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2,
+ false);
rl_result = GetReturn(true);
StoreValue(rl_dest, rl_result);
return;
@@ -91,7 +92,8 @@
case Instruction::REM_DOUBLE_2ADDR:
case Instruction::REM_DOUBLE:
FlushAllRegs(); // Send everything to home location
- CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, false);
+ CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2,
+ false);
rl_result = GetReturnWide(true);
StoreValueWide(rl_dest, rl_result);
return;
@@ -140,16 +142,16 @@
op = kThumb2VcvtDI;
break;
case Instruction::LONG_TO_DOUBLE:
- GenConversionCall(ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src);
return;
case Instruction::FLOAT_TO_LONG:
- GenConversionCall(ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src);
return;
case Instruction::LONG_TO_FLOAT:
- GenConversionCall(ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src);
return;
case Instruction::DOUBLE_TO_LONG:
- GenConversionCall(ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src);
return;
default:
LOG(FATAL) << "Unexpected opcode: " << opcode;
@@ -315,7 +317,7 @@
branch = NewLIR2(kThumbBCond, 0, kArmCondEq);
ClobberCalleeSave();
LockCallTemps(); // Using fixed registers
- int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pSqrt));
+ int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pSqrt));
NewLIR3(kThumb2Fmrrd, r0, r1, S2d(rl_src.low_reg, rl_src.high_reg));
NewLIR1(kThumbBlxR, r_tgt);
NewLIR3(kThumb2Fmdrr, S2d(rl_result.low_reg, rl_result.high_reg), r0, r1);
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index e1a77da..9db1016 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -19,8 +19,8 @@
#include "arm_lir.h"
#include "codegen_arm.h"
#include "dex/quick/mir_to_lir-inl.h"
+#include "entrypoints/quick/quick_entrypoints.h"
#include "mirror/array.h"
-#include "oat/runtime/oat_support_entrypoints.h"
namespace art {
@@ -665,7 +665,7 @@
*/
RegLocation rl_result;
if (BadOverlap(rl_src1, rl_dest) || (BadOverlap(rl_src2, rl_dest))) {
- int func_offset = ENTRYPOINT_OFFSET(pLmul);
+ int func_offset = QUICK_ENTRYPOINT_OFFSET(pLmul);
FlushAllRegs();
CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false);
rl_result = GetReturnWide(false);
@@ -956,7 +956,7 @@
// Get the array's class.
LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
- CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
+ CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
r_array_class, true);
// Redo LoadValues in case they didn't survive the call.
LoadValueDirectFixed(rl_array, r_array); // Reload array
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 40db2c6..ebe10bb 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -17,8 +17,8 @@
#include "dex/compiler_ir.h"
#include "dex/compiler_internals.h"
#include "dex/quick/mir_to_lir-inl.h"
+#include "entrypoints/quick/quick_entrypoints.h"
#include "mirror/array.h"
-#include "oat/runtime/oat_support_entrypoints.h"
#include "verifier/method_verifier.h"
namespace art {
@@ -211,9 +211,9 @@
int func_offset;
if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file,
type_idx)) {
- func_offset = ENTRYPOINT_OFFSET(pAllocArrayFromCode);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocArrayFromCode);
} else {
- func_offset= ENTRYPOINT_OFFSET(pAllocArrayFromCodeWithAccessCheck);
+ func_offset= QUICK_ENTRYPOINT_OFFSET(pAllocArrayFromCodeWithAccessCheck);
}
CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true);
RegLocation rl_result = GetReturn(false);
@@ -233,9 +233,9 @@
int func_offset;
if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file,
type_idx)) {
- func_offset = ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCode);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCode);
} else {
- func_offset = ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCodeWithAccessCheck);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCodeWithAccessCheck);
}
CallRuntimeHelperImmMethodImm(func_offset, type_idx, elems, true);
FreeTemp(TargetReg(kArg2));
@@ -375,7 +375,7 @@
// TUNING: fast path should fall through
LIR* branch_over = OpCmpImmBranch(kCondNe, rBase, 0, NULL);
LoadConstant(TargetReg(kArg0), ssb_index);
- CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true);
+ CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true);
if (cu_->instruction_set == kMips) {
// For Arm, kRet0 = kArg0 = rBase, for Mips, we need to copy
OpRegCopy(rBase, TargetReg(kRet0));
@@ -408,9 +408,9 @@
FreeTemp(rBase);
} else {
FlushAllRegs(); // Everything to home locations
- int setter_offset = is_long_or_double ? ENTRYPOINT_OFFSET(pSet64Static) :
- (is_object ? ENTRYPOINT_OFFSET(pSetObjStatic)
- : ENTRYPOINT_OFFSET(pSet32Static));
+ int setter_offset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Static) :
+ (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjStatic)
+ : QUICK_ENTRYPOINT_OFFSET(pSet32Static));
CallRuntimeHelperImmRegLocation(setter_offset, field_idx, rl_src, true);
}
}
@@ -455,7 +455,7 @@
// or NULL if not initialized. Check for NULL and call helper if NULL.
// TUNING: fast path should fall through
LIR* branch_over = OpCmpImmBranch(kCondNe, rBase, 0, NULL);
- CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true);
+ CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true);
if (cu_->instruction_set == kMips) {
// For Arm, kRet0 = kArg0 = rBase, for Mips, we need to copy
OpRegCopy(rBase, TargetReg(kRet0));
@@ -483,9 +483,9 @@
}
} else {
FlushAllRegs(); // Everything to home locations
- int getterOffset = is_long_or_double ? ENTRYPOINT_OFFSET(pGet64Static) :
- (is_object ? ENTRYPOINT_OFFSET(pGetObjStatic)
- : ENTRYPOINT_OFFSET(pGet32Static));
+ int getterOffset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Static) :
+ (is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjStatic)
+ : QUICK_ENTRYPOINT_OFFSET(pGet32Static));
CallRuntimeHelperImm(getterOffset, field_idx, true);
if (is_long_or_double) {
RegLocation rl_result = GetReturnWide(rl_dest.fp);
@@ -499,7 +499,7 @@
void Mir2Lir::HandleSuspendLaunchPads() {
int num_elems = suspend_launchpads_.Size();
- int helper_offset = ENTRYPOINT_OFFSET(pTestSuspendFromCode);
+ int helper_offset = QUICK_ENTRYPOINT_OFFSET(pTestSuspendFromCode);
for (int i = 0; i < num_elems; i++) {
ResetRegPool();
ResetDefTracking();
@@ -545,7 +545,7 @@
bool target_x86 = (cu_->instruction_set == kX86);
switch (lab->operands[0]) {
case kThrowNullPointer:
- func_offset = ENTRYPOINT_OFFSET(pThrowNullPointerFromCode);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowNullPointerFromCode);
break;
case kThrowConstantArrayBounds: // v1 is length reg (for Arm/Mips), v2 constant index
// v1 holds the constant array index. Mips/Arm uses v2 for length, x86 reloads.
@@ -557,7 +557,7 @@
// Make sure the following LoadConstant doesn't mess with kArg1.
LockTemp(TargetReg(kArg1));
LoadConstant(TargetReg(kArg0), v2);
- func_offset = ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode);
break;
case kThrowArrayBounds:
// Move v1 (array index) to kArg0 and v2 (array length) to kArg1
@@ -590,18 +590,18 @@
OpRegCopy(TargetReg(kArg0), v1);
}
}
- func_offset = ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode);
break;
case kThrowDivZero:
- func_offset = ENTRYPOINT_OFFSET(pThrowDivZeroFromCode);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowDivZeroFromCode);
break;
case kThrowNoSuchMethod:
OpRegCopy(TargetReg(kArg0), v1);
func_offset =
- ENTRYPOINT_OFFSET(pThrowNoSuchMethodFromCode);
+ QUICK_ENTRYPOINT_OFFSET(pThrowNoSuchMethodFromCode);
break;
case kThrowStackOverflow:
- func_offset = ENTRYPOINT_OFFSET(pThrowStackOverflowFromCode);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowStackOverflowFromCode);
// Restore stack alignment
if (target_x86) {
OpRegImm(kOpAdd, TargetReg(kSp), frame_size_);
@@ -664,9 +664,9 @@
StoreValue(rl_dest, rl_result);
}
} else {
- int getterOffset = is_long_or_double ? ENTRYPOINT_OFFSET(pGet64Instance) :
- (is_object ? ENTRYPOINT_OFFSET(pGetObjInstance)
- : ENTRYPOINT_OFFSET(pGet32Instance));
+ int getterOffset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Instance) :
+ (is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjInstance)
+ : QUICK_ENTRYPOINT_OFFSET(pGet32Instance));
CallRuntimeHelperImmRegLocation(getterOffset, field_idx, rl_obj, true);
if (is_long_or_double) {
RegLocation rl_result = GetReturnWide(rl_dest.fp);
@@ -719,9 +719,9 @@
}
}
} else {
- int setter_offset = is_long_or_double ? ENTRYPOINT_OFFSET(pSet64Instance) :
- (is_object ? ENTRYPOINT_OFFSET(pSetObjInstance)
- : ENTRYPOINT_OFFSET(pSet32Instance));
+ int setter_offset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Instance) :
+ (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjInstance)
+ : QUICK_ENTRYPOINT_OFFSET(pSet32Instance));
CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_idx, rl_obj, rl_src, true);
}
}
@@ -735,7 +735,7 @@
type_idx)) {
// Call out to helper which resolves type and verifies access.
// Resolved type returned in kRet0.
- CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
+ CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
type_idx, rl_method.low_reg, true);
RegLocation rl_result = GetReturn(false);
StoreValue(rl_dest, rl_result);
@@ -764,7 +764,7 @@
// TUNING: move slow path to end & remove unconditional branch
LIR* target1 = NewLIR0(kPseudoTargetLabel);
// Call out to helper, which will return resolved type in kArg0
- CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx,
+ CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx,
rl_method.low_reg, true);
RegLocation rl_result = GetReturn(false);
StoreValue(rl_dest, rl_result);
@@ -797,7 +797,7 @@
LoadWordDisp(TargetReg(kArg2),
mirror::AbstractMethod::DexCacheStringsOffset().Int32Value(), TargetReg(kArg0));
// Might call out to helper, which will return resolved string in kRet0
- int r_tgt = CallHelperSetup(ENTRYPOINT_OFFSET(pResolveStringFromCode));
+ int r_tgt = CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(pResolveStringFromCode));
LoadWordDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0));
LoadConstant(TargetReg(kArg1), string_idx);
if (cu_->instruction_set == kThumb2) {
@@ -821,7 +821,8 @@
branch->target = target;
} else {
DCHECK_EQ(cu_->instruction_set, kX86);
- CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pResolveStringFromCode), TargetReg(kArg2), TargetReg(kArg1), true);
+ CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pResolveStringFromCode), TargetReg(kArg2),
+ TargetReg(kArg1), true);
}
GenBarrier();
StoreValue(rl_dest, GetReturn(false));
@@ -847,9 +848,9 @@
int func_offset;
if (cu_->compiler_driver->CanAccessInstantiableTypeWithoutChecks(
cu_->method_idx, *cu_->dex_file, type_idx)) {
- func_offset = ENTRYPOINT_OFFSET(pAllocObjectFromCode);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectFromCode);
} else {
- func_offset = ENTRYPOINT_OFFSET(pAllocObjectFromCodeWithAccessCheck);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectFromCodeWithAccessCheck);
}
CallRuntimeHelperImmMethod(func_offset, type_idx, true);
RegLocation rl_result = GetReturn(false);
@@ -858,7 +859,7 @@
void Mir2Lir::GenThrow(RegLocation rl_src) {
FlushAllRegs();
- CallRuntimeHelperRegLocation(ENTRYPOINT_OFFSET(pDeliverException), rl_src, true);
+ CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(pDeliverException), rl_src, true);
}
// For final classes there are no sub-classes to check and so we can answer the instance-of
@@ -928,7 +929,7 @@
if (needs_access_check) {
// Check we have access to type_idx and if not throw IllegalAccessError,
// returns Class* in kArg0
- CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
+ CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
type_idx, true);
OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path
LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref
@@ -950,7 +951,7 @@
LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
// Not resolved
// Call out to helper, which will return resolved type in kRet0
- CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, true);
+ CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, true);
OpRegCopy(TargetReg(kArg2), TargetReg(kRet0)); // Align usage with fast path
LoadValueDirectFixed(rl_src, TargetReg(kArg0)); /* reload Ref */
// Rejoin code paths
@@ -985,7 +986,7 @@
}
} else {
if (cu_->instruction_set == kThumb2) {
- int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+ int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
if (!type_known_abstract) {
/* Uses conditional nullification */
OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same?
@@ -1002,13 +1003,13 @@
branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
}
if (cu_->instruction_set != kX86) {
- int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+ int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class
OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class)
FreeTemp(r_tgt);
} else {
OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
- OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+ OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
}
}
}
@@ -1068,7 +1069,7 @@
// Check we have access to type_idx and if not throw IllegalAccessError,
// returns Class* in kRet0
// InitializeTypeAndVerifyAccess(idx, method)
- CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
+ CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
type_idx, TargetReg(kArg1), true);
OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path
} else if (use_declaring_class) {
@@ -1088,8 +1089,8 @@
// Not resolved
// Call out to helper, which will return resolved type in kArg0
// InitializeTypeFromCode(idx, method)
- CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, TargetReg(kArg1),
- true);
+ CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx,
+ TargetReg(kArg1), true);
OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path
// Rejoin code paths
LIR* hop_target = NewLIR0(kPseudoTargetLabel);
@@ -1108,8 +1109,8 @@
if (!type_known_abstract) {
branch2 = OpCmpBranch(kCondEq, TargetReg(kArg1), class_reg, NULL);
}
- CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCheckCastFromCode), TargetReg(kArg1), TargetReg(kArg2),
- true);
+ CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCheckCastFromCode), TargetReg(kArg1),
+ TargetReg(kArg2), true);
/* branch target here */
LIR* target = NewLIR0(kPseudoTargetLabel);
branch1->target = target;
@@ -1172,15 +1173,15 @@
switch (opcode) {
case Instruction::SHL_LONG:
case Instruction::SHL_LONG_2ADDR:
- func_offset = ENTRYPOINT_OFFSET(pShlLong);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pShlLong);
break;
case Instruction::SHR_LONG:
case Instruction::SHR_LONG_2ADDR:
- func_offset = ENTRYPOINT_OFFSET(pShrLong);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pShrLong);
break;
case Instruction::USHR_LONG:
case Instruction::USHR_LONG_2ADDR:
- func_offset = ENTRYPOINT_OFFSET(pUshrLong);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pUshrLong);
break;
default:
LOG(FATAL) << "Unexpected case";
@@ -1302,7 +1303,7 @@
}
rl_result = GenDivRem(rl_dest, rl_src1.low_reg, rl_src2.low_reg, op == kOpDiv);
} else {
- int func_offset = ENTRYPOINT_OFFSET(pIdivmod);
+ int func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod);
FlushAllRegs(); /* Send everything to home location */
LoadValueDirectFixed(rl_src2, TargetReg(kArg1));
int r_tgt = CallHelperSetup(func_offset);
@@ -1557,7 +1558,7 @@
FlushAllRegs(); /* Everything to home location */
LoadValueDirectFixed(rl_src, TargetReg(kArg0));
Clobber(TargetReg(kArg0));
- int func_offset = ENTRYPOINT_OFFSET(pIdivmod);
+ int func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod);
CallRuntimeHelperRegImm(func_offset, TargetReg(kArg0), lit, false);
if (is_div)
rl_result = GetReturn(false);
@@ -1634,7 +1635,7 @@
} else {
call_out = true;
ret_reg = TargetReg(kRet0);
- func_offset = ENTRYPOINT_OFFSET(pLmul);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pLmul);
}
break;
case Instruction::DIV_LONG:
@@ -1642,13 +1643,13 @@
call_out = true;
check_zero = true;
ret_reg = TargetReg(kRet0);
- func_offset = ENTRYPOINT_OFFSET(pLdiv);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pLdiv);
break;
case Instruction::REM_LONG:
case Instruction::REM_LONG_2ADDR:
call_out = true;
check_zero = true;
- func_offset = ENTRYPOINT_OFFSET(pLdivmod);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pLdivmod);
/* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */
ret_reg = (cu_->instruction_set == kThumb2) ? TargetReg(kArg2) : TargetReg(kRet0);
break;
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index cae1319..1b34e99 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -16,11 +16,11 @@
#include "dex/compiler_ir.h"
#include "dex_file-inl.h"
+#include "entrypoints/quick/quick_entrypoints.h"
#include "invoke_type.h"
#include "mirror/array.h"
#include "mirror/string.h"
#include "mir_to_lir-inl.h"
-#include "oat/runtime/oat_support_entrypoints.h"
#include "x86/codegen_x86.h"
namespace art {
@@ -471,7 +471,7 @@
direct_method = 0;
}
int trampoline = (cu->instruction_set == kX86) ? 0
- : ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline);
+ : QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline);
if (direct_method != 0) {
switch (state) {
@@ -555,7 +555,7 @@
uint32_t method_idx,
uintptr_t unused, uintptr_t unused2,
InvokeType unused3) {
- int trampoline = ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
+ int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
}
@@ -563,7 +563,7 @@
const MethodReference& target_method,
uint32_t method_idx, uintptr_t unused,
uintptr_t unused2, InvokeType unused3) {
- int trampoline = ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
+ int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
}
@@ -571,7 +571,7 @@
const MethodReference& target_method,
uint32_t method_idx, uintptr_t unused,
uintptr_t unused2, InvokeType unused3) {
- int trampoline = ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
+ int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
}
@@ -579,7 +579,7 @@
const MethodReference& target_method,
uint32_t method_idx, uintptr_t unused,
uintptr_t unused2, InvokeType unused3) {
- int trampoline = ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
+ int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
}
@@ -589,7 +589,7 @@
uint32_t unused,
uintptr_t unused2, uintptr_t unused3,
InvokeType unused4) {
- int trampoline = ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
+ int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
}
@@ -773,14 +773,14 @@
// Generate memcpy
OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset);
OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset);
- CallRuntimeHelperRegRegImm(ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
+ CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
TargetReg(kArg1), (info->num_arg_words - 3) * 4, false);
} else {
if (info->num_arg_words >= 20) {
// Generate memcpy
OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset);
OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset);
- CallRuntimeHelperRegRegImm(ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
+ CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
TargetReg(kArg1), (info->num_arg_words - 3) * 4, false);
} else {
// Use vldm/vstm pair using kArg3 as a temp
@@ -1047,7 +1047,7 @@
} else {
LoadValueDirectFixed(rl_start, reg_start);
}
- int r_tgt = (cu_->instruction_set != kX86) ? LoadHelper(ENTRYPOINT_OFFSET(pIndexOf)) : 0;
+ int r_tgt = (cu_->instruction_set != kX86) ? LoadHelper(QUICK_ENTRYPOINT_OFFSET(pIndexOf)) : 0;
GenNullCheck(rl_obj.s_reg_low, reg_ptr, info->opt_flags);
LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
intrinsic_launchpads_.Insert(launch_pad);
@@ -1056,7 +1056,7 @@
if (cu_->instruction_set != kX86) {
OpReg(kOpBlx, r_tgt);
} else {
- OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pIndexOf));
+ OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pIndexOf));
}
LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
launch_pad->operands[2] = reinterpret_cast<uintptr_t>(resume_tgt);
@@ -1084,7 +1084,7 @@
LoadValueDirectFixed(rl_this, reg_this);
LoadValueDirectFixed(rl_cmp, reg_cmp);
int r_tgt = (cu_->instruction_set != kX86) ?
- LoadHelper(ENTRYPOINT_OFFSET(pStringCompareTo)) : 0;
+ LoadHelper(QUICK_ENTRYPOINT_OFFSET(pStringCompareTo)) : 0;
GenNullCheck(rl_this.s_reg_low, reg_this, info->opt_flags);
// TUNING: check if rl_cmp.s_reg_low is already null checked
LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
@@ -1094,7 +1094,7 @@
if (cu_->instruction_set != kX86) {
OpReg(kOpBlx, r_tgt);
} else {
- OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pStringCompareTo));
+ OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pStringCompareTo));
}
launch_pad->operands[2] = 0; // No return possible
// Record that we've already inlined & null checked
@@ -1409,20 +1409,20 @@
int trampoline = 0;
switch (info->type) {
case kInterface:
- trampoline = fast_path ? ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline)
- : ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
+ trampoline = fast_path ? QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline)
+ : QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
break;
case kDirect:
- trampoline = ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
+ trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
break;
case kStatic:
- trampoline = ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
+ trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
break;
case kSuper:
- trampoline = ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
+ trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
break;
case kVirtual:
- trampoline = ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
+ trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
break;
default:
LOG(FATAL) << "Unexpected invoke type";
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 8b375ea..846c055 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -18,8 +18,8 @@
#include "codegen_mips.h"
#include "dex/quick/mir_to_lir-inl.h"
+#include "entrypoints/quick/quick_entrypoints.h"
#include "mips_lir.h"
-#include "oat/runtime/oat_support_entrypoints.h"
namespace art {
@@ -247,7 +247,7 @@
GenBarrier();
NewLIR0(kMipsCurrPC); // Really a jal to .+8
// Now, fill the branch delay slot with the helper load
- int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode));
+ int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode));
GenBarrier(); // Scheduling barrier
// Construct BaseLabel and set up table base register
@@ -272,7 +272,7 @@
LockCallTemps(); // Prepare for explicit register usage
GenNullCheck(rl_src.s_reg_low, rMIPS_ARG0, opt_flags);
// Go expensive route - artLockObjectFromCode(self, obj);
- int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pLockObjectFromCode));
+ int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pLockObjectFromCode));
ClobberCalleeSave();
LIR* call_inst = OpReg(kOpBlx, r_tgt);
MarkSafepointPC(call_inst);
@@ -287,7 +287,7 @@
LockCallTemps(); // Prepare for explicit register usage
GenNullCheck(rl_src.s_reg_low, rMIPS_ARG0, opt_flags);
// Go expensive route - UnlockObjectFromCode(obj);
- int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pUnlockObjectFromCode));
+ int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pUnlockObjectFromCode));
ClobberCalleeSave();
LIR* call_inst = OpReg(kOpBlx, r_tgt);
MarkSafepointPC(call_inst);
diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc
index 6cd9acc..3203017 100644
--- a/compiler/dex/quick/mips/fp_mips.cc
+++ b/compiler/dex/quick/mips/fp_mips.cc
@@ -16,8 +16,8 @@
#include "codegen_mips.h"
#include "dex/quick/mir_to_lir-inl.h"
+#include "entrypoints/quick/quick_entrypoints.h"
#include "mips_lir.h"
-#include "oat/runtime/oat_support_entrypoints.h"
namespace art {
@@ -50,7 +50,8 @@
case Instruction::REM_FLOAT_2ADDR:
case Instruction::REM_FLOAT:
FlushAllRegs(); // Send everything to home location
- CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, false);
+ CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2,
+ false);
rl_result = GetReturn(true);
StoreValue(rl_dest, rl_result);
return;
@@ -92,7 +93,8 @@
case Instruction::REM_DOUBLE_2ADDR:
case Instruction::REM_DOUBLE:
FlushAllRegs(); // Send everything to home location
- CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, false);
+ CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2,
+ false);
rl_result = GetReturnWide(true);
StoreValueWide(rl_dest, rl_result);
return;
@@ -133,22 +135,22 @@
op = kMipsFcvtdw;
break;
case Instruction::FLOAT_TO_INT:
- GenConversionCall(ENTRYPOINT_OFFSET(pF2iz), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pF2iz), rl_dest, rl_src);
return;
case Instruction::DOUBLE_TO_INT:
- GenConversionCall(ENTRYPOINT_OFFSET(pD2iz), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pD2iz), rl_dest, rl_src);
return;
case Instruction::LONG_TO_DOUBLE:
- GenConversionCall(ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src);
return;
case Instruction::FLOAT_TO_LONG:
- GenConversionCall(ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src);
return;
case Instruction::LONG_TO_FLOAT:
- GenConversionCall(ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src);
return;
case Instruction::DOUBLE_TO_LONG:
- GenConversionCall(ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src);
return;
default:
LOG(FATAL) << "Unexpected opcode: " << opcode;
@@ -178,18 +180,18 @@
switch (opcode) {
case Instruction::CMPL_FLOAT:
- offset = ENTRYPOINT_OFFSET(pCmplFloat);
+ offset = QUICK_ENTRYPOINT_OFFSET(pCmplFloat);
wide = false;
break;
case Instruction::CMPG_FLOAT:
- offset = ENTRYPOINT_OFFSET(pCmpgFloat);
+ offset = QUICK_ENTRYPOINT_OFFSET(pCmpgFloat);
wide = false;
break;
case Instruction::CMPL_DOUBLE:
- offset = ENTRYPOINT_OFFSET(pCmplDouble);
+ offset = QUICK_ENTRYPOINT_OFFSET(pCmplDouble);
break;
case Instruction::CMPG_DOUBLE:
- offset = ENTRYPOINT_OFFSET(pCmpgDouble);
+ offset = QUICK_ENTRYPOINT_OFFSET(pCmpgDouble);
break;
default:
LOG(FATAL) << "Unexpected opcode: " << opcode;
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index ea7da60..bd044c6 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -18,9 +18,9 @@
#include "codegen_mips.h"
#include "dex/quick/mir_to_lir-inl.h"
+#include "entrypoints/quick/quick_entrypoints.h"
#include "mips_lir.h"
#include "mirror/array.h"
-#include "oat/runtime/oat_support_entrypoints.h"
namespace art {
@@ -579,7 +579,7 @@
// Get the array's class.
LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
- CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
+ CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
r_array_class, true);
// Redo LoadValues in case they didn't survive the call.
LoadValueDirectFixed(rl_array, r_array); // Reload array
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index d530a1c..1c395de 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -148,7 +148,7 @@
NewLIR1(kX86StartOfMethod, rX86_ARG2);
NewLIR2(kX86PcRelAdr, rX86_ARG1, reinterpret_cast<uintptr_t>(tab_rec));
NewLIR2(kX86Add32RR, rX86_ARG1, rX86_ARG2);
- CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), rX86_ARG0,
+ CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), rX86_ARG0,
rX86_ARG1, true);
}
@@ -165,7 +165,7 @@
NewLIR3(kX86LockCmpxchgMR, rCX, mirror::Object::MonitorOffset().Int32Value(), rDX);
LIR* branch = NewLIR2(kX86Jcc8, 0, kX86CondEq);
// If lock is held, go the expensive route - artLockObjectFromCode(self, obj);
- CallRuntimeHelperReg(ENTRYPOINT_OFFSET(pLockObjectFromCode), rCX, true);
+ CallRuntimeHelperReg(QUICK_ENTRYPOINT_OFFSET(pLockObjectFromCode), rCX, true);
branch->target = NewLIR0(kPseudoTargetLabel);
}
@@ -185,7 +185,7 @@
LIR* branch2 = NewLIR1(kX86Jmp8, 0);
branch->target = NewLIR0(kPseudoTargetLabel);
// Otherwise, go the expensive route - UnlockObjectFromCode(obj);
- CallRuntimeHelperReg(ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rAX, true);
+ CallRuntimeHelperReg(QUICK_ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rAX, true);
branch2->target = NewLIR0(kPseudoTargetLabel);
}
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index cc6f374..f736b5e 100644
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -49,7 +49,8 @@
case Instruction::REM_FLOAT_2ADDR:
case Instruction::REM_FLOAT:
FlushAllRegs(); // Send everything to home location
- CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, false);
+ CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2,
+ false);
rl_result = GetReturn(true);
StoreValue(rl_dest, rl_result);
return;
@@ -99,7 +100,8 @@
case Instruction::REM_DOUBLE_2ADDR:
case Instruction::REM_DOUBLE:
FlushAllRegs(); // Send everything to home location
- CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, false);
+ CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2,
+ false);
rl_result = GetReturnWide(true);
StoreValueWide(rl_dest, rl_result);
return;
@@ -196,17 +198,17 @@
return;
}
case Instruction::LONG_TO_DOUBLE:
- GenConversionCall(ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src);
return;
case Instruction::LONG_TO_FLOAT:
// TODO: inline by using memory as a 64-bit source. Be careful about promoted registers.
- GenConversionCall(ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src);
return;
case Instruction::FLOAT_TO_LONG:
- GenConversionCall(ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src);
return;
case Instruction::DOUBLE_TO_LONG:
- GenConversionCall(ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src);
return;
default:
LOG(INFO) << "Unexpected opcode: " << opcode;
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 3be24df..0b4b4be 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -532,7 +532,7 @@
// Get the array's class.
LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
- CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
+ CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
r_array_class, true);
// Redo LoadValues in case they didn't survive the call.
LoadValueDirectFixed(rl_array, r_array); // Reload array
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 038957e..49aba4d 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -495,7 +495,7 @@
void CompilerDriver::CompileAll(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
- TimingLogger& timings) {
+ base::TimingLogger& timings) {
DCHECK(!Runtime::Current()->IsStarted());
UniquePtr<ThreadPool> thread_pool(new ThreadPool(thread_count_));
PreCompile(class_loader, dex_files, *thread_pool.get(), timings);
@@ -528,7 +528,7 @@
return klass->IsVerified();
}
-void CompilerDriver::CompileOne(const mirror::AbstractMethod* method, TimingLogger& timings) {
+void CompilerDriver::CompileOne(const mirror::AbstractMethod* method, base::TimingLogger& timings) {
DCHECK(!Runtime::Current()->IsStarted());
Thread* self = Thread::Current();
jobject jclass_loader;
@@ -572,7 +572,7 @@
}
void CompilerDriver::Resolve(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, TimingLogger& timings) {
+ ThreadPool& thread_pool, base::TimingLogger& timings) {
for (size_t i = 0; i != dex_files.size(); ++i) {
const DexFile* dex_file = dex_files[i];
CHECK(dex_file != NULL);
@@ -581,7 +581,7 @@
}
void CompilerDriver::PreCompile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, TimingLogger& timings) {
+ ThreadPool& thread_pool, base::TimingLogger& timings) {
LoadImageClasses(timings);
Resolve(class_loader, dex_files, thread_pool, timings);
@@ -666,12 +666,13 @@
}
// Make a list of descriptors for classes to include in the image
-void CompilerDriver::LoadImageClasses(TimingLogger& timings)
+void CompilerDriver::LoadImageClasses(base::TimingLogger& timings)
LOCKS_EXCLUDED(Locks::mutator_lock_) {
if (image_classes_.get() == NULL) {
return;
}
+ timings.NewSplit("LoadImageClasses");
// Make a first class to load all classes explicitly listed in the file
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
@@ -726,7 +727,6 @@
class_linker->VisitClasses(RecordImageClassesVisitor, image_classes_.get());
CHECK_NE(image_classes_->size(), 0U);
- timings.AddSplit("LoadImageClasses");
}
static void MaybeAddToImageClasses(mirror::Class* klass, CompilerDriver::DescriptorSet* image_classes)
@@ -758,11 +758,13 @@
MaybeAddToImageClasses(object->GetClass(), compiler_driver->image_classes_.get());
}
-void CompilerDriver::UpdateImageClasses(TimingLogger& timings) {
+void CompilerDriver::UpdateImageClasses(base::TimingLogger& timings) {
if (image_classes_.get() == NULL) {
return;
}
+ timings.NewSplit("UpdateImageClasses");
+
// Update image_classes_ with classes for objects created by <clinit> methods.
Thread* self = Thread::Current();
const char* old_cause = self->StartAssertNoThreadSuspension("ImageWriter");
@@ -772,7 +774,6 @@
heap->FlushAllocStack();
heap->GetLiveBitmap()->Walk(FindClinitImageClassesCallback, this);
self->EndAssertNoThreadSuspension(old_cause);
- timings.AddSplit("UpdateImageClasses");
}
void CompilerDriver::RecordClassStatus(ClassReference ref, CompiledClass* compiled_class) {
@@ -1551,22 +1552,22 @@
}
void CompilerDriver::ResolveDexFile(jobject class_loader, const DexFile& dex_file,
- ThreadPool& thread_pool, TimingLogger& timings) {
+ ThreadPool& thread_pool, base::TimingLogger& timings) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
// TODO: we could resolve strings here, although the string table is largely filled with class
// and method names.
+ timings.NewSplit(strdup(("Resolve " + dex_file.GetLocation() + " Types").c_str()));
ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, thread_pool);
context.ForAll(0, dex_file.NumTypeIds(), ResolveType, thread_count_);
- timings.AddSplit("Resolve " + dex_file.GetLocation() + " Types");
+ timings.NewSplit(strdup(("Resolve " + dex_file.GetLocation() + " MethodsAndFields").c_str()));
context.ForAll(0, dex_file.NumClassDefs(), ResolveClassFieldsAndMethods, thread_count_);
- timings.AddSplit("Resolve " + dex_file.GetLocation() + " MethodsAndFields");
}
void CompilerDriver::Verify(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, TimingLogger& timings) {
+ ThreadPool& thread_pool, base::TimingLogger& timings) {
for (size_t i = 0; i != dex_files.size(); ++i) {
const DexFile* dex_file = dex_files[i];
CHECK(dex_file != NULL);
@@ -1620,11 +1621,11 @@
}
void CompilerDriver::VerifyDexFile(jobject class_loader, const DexFile& dex_file,
- ThreadPool& thread_pool, TimingLogger& timings) {
+ ThreadPool& thread_pool, base::TimingLogger& timings) {
+ timings.NewSplit(strdup(("Verify " + dex_file.GetLocation()).c_str()));
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ParallelCompilationManager context(class_linker, class_loader, this, &dex_file, thread_pool);
context.ForAll(0, dex_file.NumClassDefs(), VerifyClass, thread_count_);
- timings.AddSplit("Verify " + dex_file.GetLocation());
}
static const char* class_initializer_black_list[] = {
@@ -2116,7 +2117,8 @@
}
void CompilerDriver::InitializeClasses(jobject jni_class_loader, const DexFile& dex_file,
- ThreadPool& thread_pool, TimingLogger& timings) {
+ ThreadPool& thread_pool, base::TimingLogger& timings) {
+ timings.NewSplit(strdup(("InitializeNoClinit " + dex_file.GetLocation()).c_str()));
#ifndef NDEBUG
for (size_t i = 0; i < arraysize(class_initializer_black_list); ++i) {
const char* descriptor = class_initializer_black_list[i];
@@ -2126,12 +2128,11 @@
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
ParallelCompilationManager context(class_linker, jni_class_loader, this, &dex_file, thread_pool);
context.ForAll(0, dex_file.NumClassDefs(), InitializeClass, thread_count_);
- timings.AddSplit("InitializeNoClinit " + dex_file.GetLocation());
}
void CompilerDriver::InitializeClasses(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, TimingLogger& timings) {
+ ThreadPool& thread_pool, base::TimingLogger& timings) {
for (size_t i = 0; i != dex_files.size(); ++i) {
const DexFile* dex_file = dex_files[i];
CHECK(dex_file != NULL);
@@ -2140,7 +2141,7 @@
}
void CompilerDriver::Compile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, TimingLogger& timings) {
+ ThreadPool& thread_pool, base::TimingLogger& timings) {
for (size_t i = 0; i != dex_files.size(); ++i) {
const DexFile* dex_file = dex_files[i];
CHECK(dex_file != NULL);
@@ -2220,10 +2221,10 @@
}
void CompilerDriver::CompileDexFile(jobject class_loader, const DexFile& dex_file,
- ThreadPool& thread_pool, TimingLogger& timings) {
+ ThreadPool& thread_pool, base::TimingLogger& timings) {
+ timings.NewSplit(strdup(("Compile " + dex_file.GetLocation()).c_str()));
ParallelCompilationManager context(NULL, class_loader, this, &dex_file, thread_pool);
context.ForAll(0, dex_file.NumClassDefs(), CompilerDriver::CompileClass, thread_count_);
- timings.AddSplit("Compile " + dex_file.GetLocation());
}
void CompilerDriver::CompileMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
@@ -2239,18 +2240,8 @@
CHECK(compiled_method != NULL);
} else if ((access_flags & kAccAbstract) != 0) {
} else {
- // In small mode we only compile image classes.
- bool dont_compile = (Runtime::Current()->IsSmallMode() &&
- ((image_classes_.get() == NULL) || (image_classes_->size() == 0)));
-
- // Don't compile class initializers, ever.
- if (((access_flags & kAccConstructor) != 0) && ((access_flags & kAccStatic) != 0)) {
- dont_compile = true;
- } else if (code_item->insns_size_in_code_units_ < Runtime::Current()->GetSmallModeMethodDexSizeLimit()) {
- // Do compile small methods.
- dont_compile = false;
- }
- if (!dont_compile) {
+ bool compile = verifier::MethodVerifier::IsCandidateForCompilation(code_item, access_flags);
+ if (compile) {
CompilerFn compiler = compiler_;
#ifdef ART_SEA_IR_MODE
bool use_sea = Runtime::Current()->IsSeaIRMode();
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index f3f72dd..a7a47ed 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -78,11 +78,11 @@
~CompilerDriver();
void CompileAll(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- TimingLogger& timings)
+ base::TimingLogger& timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
// Compile a single Method
- void CompileOne(const mirror::AbstractMethod* method, TimingLogger& timings)
+ void CompileOne(const mirror::AbstractMethod* method, base::TimingLogger& timings)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
InstructionSet GetInstructionSet() const {
@@ -284,42 +284,42 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void PreCompile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, TimingLogger& timings)
+ ThreadPool& thread_pool, base::TimingLogger& timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
- void LoadImageClasses(TimingLogger& timings);
+ void LoadImageClasses(base::TimingLogger& timings);
// Attempt to resolve all type, methods, fields, and strings
// referenced from code in the dex file following PathClassLoader
// ordering semantics.
void Resolve(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, TimingLogger& timings)
+ ThreadPool& thread_pool, base::TimingLogger& timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
void ResolveDexFile(jobject class_loader, const DexFile& dex_file,
- ThreadPool& thread_pool, TimingLogger& timings)
+ ThreadPool& thread_pool, base::TimingLogger& timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
void Verify(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, TimingLogger& timings);
+ ThreadPool& thread_pool, base::TimingLogger& timings);
void VerifyDexFile(jobject class_loader, const DexFile& dex_file,
- ThreadPool& thread_pool, TimingLogger& timings)
+ ThreadPool& thread_pool, base::TimingLogger& timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
void InitializeClasses(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, TimingLogger& timings)
+ ThreadPool& thread_pool, base::TimingLogger& timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
void InitializeClasses(jobject class_loader, const DexFile& dex_file,
- ThreadPool& thread_pool, TimingLogger& timings)
+ ThreadPool& thread_pool, base::TimingLogger& timings)
LOCKS_EXCLUDED(Locks::mutator_lock_, compiled_classes_lock_);
- void UpdateImageClasses(TimingLogger& timings);
+ void UpdateImageClasses(base::TimingLogger& timings);
static void FindClinitImageClassesCallback(mirror::Object* object, void* arg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void Compile(jobject class_loader, const std::vector<const DexFile*>& dex_files,
- ThreadPool& thread_pool, TimingLogger& timings);
+ ThreadPool& thread_pool, base::TimingLogger& timings);
void CompileDexFile(jobject class_loader, const DexFile& dex_file,
- ThreadPool& thread_pool, TimingLogger& timings)
+ ThreadPool& thread_pool, base::TimingLogger& timings)
LOCKS_EXCLUDED(Locks::mutator_lock_);
void CompileMethod(const DexFile::CodeItem* code_item, uint32_t access_flags,
InvokeType invoke_type, uint32_t class_def_idx, uint32_t method_idx,
diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc
index 78cacaf..8ee9cf6 100644
--- a/compiler/driver/compiler_driver_test.cc
+++ b/compiler/driver/compiler_driver_test.cc
@@ -36,7 +36,8 @@
class CompilerDriverTest : public CommonTest {
protected:
void CompileAll(jobject class_loader) LOCKS_EXCLUDED(Locks::mutator_lock_) {
- TimingLogger timings("CompilerDriverTest::CompileAll", false);
+ base::TimingLogger timings("CompilerDriverTest::CompileAll", false, false);
+ timings.StartSplit("CompileAll");
compiler_driver_->CompileAll(class_loader,
Runtime::Current()->GetCompileTimeClassPath(class_loader),
timings);
diff --git a/compiler/jni/quick/arm/calling_convention_arm.cc b/compiler/jni/quick/arm/calling_convention_arm.cc
index e9b09c5..9778293 100644
--- a/compiler/jni/quick/arm/calling_convention_arm.cc
+++ b/compiler/jni/quick/arm/calling_convention_arm.cc
@@ -16,7 +16,7 @@
#include "base/logging.h"
#include "calling_convention_arm.h"
-#include "oat/utils/arm/managed_register_arm.h"
+#include "utils/arm/managed_register_arm.h"
namespace art {
namespace arm {
diff --git a/compiler/jni/quick/calling_convention.h b/compiler/jni/quick/calling_convention.h
index d492b42..f2b7fd9 100644
--- a/compiler/jni/quick/calling_convention.h
+++ b/compiler/jni/quick/calling_convention.h
@@ -18,9 +18,9 @@
#define ART_COMPILER_JNI_QUICK_CALLING_CONVENTION_H_
#include <vector>
-#include "oat/utils/managed_register.h"
#include "stack_indirect_reference_table.h"
#include "thread.h"
+#include "utils/managed_register.h"
namespace art {
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index fa227f7..b069fbd 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -25,13 +25,13 @@
#include "dex_file-inl.h"
#include "driver/compiler_driver.h"
#include "disassembler.h"
+#include "entrypoints/quick/quick_entrypoints.h"
#include "jni_internal.h"
-#include "oat/runtime/oat_support_entrypoints.h"
-#include "oat/utils/assembler.h"
-#include "oat/utils/managed_register.h"
-#include "oat/utils/arm/managed_register_arm.h"
-#include "oat/utils/mips/managed_register_mips.h"
-#include "oat/utils/x86/managed_register_x86.h"
+#include "utils/assembler.h"
+#include "utils/managed_register.h"
+#include "utils/arm/managed_register_arm.h"
+#include "utils/mips/managed_register_mips.h"
+#include "utils/x86/managed_register_x86.h"
#include "thread.h"
#include "UniquePtr.h"
@@ -172,8 +172,8 @@
// can occur. The result is the saved JNI local state that is restored by the exit call. We
// abuse the JNI calling convention here, that is guaranteed to support passing 2 pointer
// arguments.
- uintptr_t jni_start = is_synchronized ? ENTRYPOINT_OFFSET(pJniMethodStartSynchronized)
- : ENTRYPOINT_OFFSET(pJniMethodStart);
+ uintptr_t jni_start = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(pJniMethodStartSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(pJniMethodStart);
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
FrameOffset locked_object_sirt_offset(0);
if (is_synchronized) {
@@ -304,13 +304,13 @@
uintptr_t jni_end;
if (reference_return) {
// Pass result.
- jni_end = is_synchronized ? ENTRYPOINT_OFFSET(pJniMethodEndWithReferenceSynchronized)
- : ENTRYPOINT_OFFSET(pJniMethodEndWithReference);
+ jni_end = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(pJniMethodEndWithReferenceSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(pJniMethodEndWithReference);
SetNativeParameter(jni_asm.get(), end_jni_conv.get(), end_jni_conv->ReturnRegister());
end_jni_conv->Next();
} else {
- jni_end = is_synchronized ? ENTRYPOINT_OFFSET(pJniMethodEndSynchronized)
- : ENTRYPOINT_OFFSET(pJniMethodEnd);
+ jni_end = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(pJniMethodEndSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(pJniMethodEnd);
}
// Pass saved local reference state.
if (end_jni_conv->IsCurrentParamOnStack()) {
diff --git a/compiler/jni/quick/mips/calling_convention_mips.cc b/compiler/jni/quick/mips/calling_convention_mips.cc
index 053ab44..0a48500 100644
--- a/compiler/jni/quick/mips/calling_convention_mips.cc
+++ b/compiler/jni/quick/mips/calling_convention_mips.cc
@@ -17,7 +17,7 @@
#include "calling_convention_mips.h"
#include "base/logging.h"
-#include "oat/utils/mips/managed_register_mips.h"
+#include "utils/mips/managed_register_mips.h"
namespace art {
namespace mips {
diff --git a/compiler/jni/quick/x86/calling_convention_x86.cc b/compiler/jni/quick/x86/calling_convention_x86.cc
index 45dd429..8b5c86d 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.cc
+++ b/compiler/jni/quick/x86/calling_convention_x86.cc
@@ -17,7 +17,7 @@
#include "calling_convention_x86.h"
#include "base/logging.h"
-#include "oat/utils/x86/managed_register_x86.h"
+#include "utils/x86/managed_register_x86.h"
#include "utils.h"
namespace art {
diff --git a/compiler/llvm/runtime_support_builder.cc b/compiler/llvm/runtime_support_builder.cc
index 7299803..24e283d 100644
--- a/compiler/llvm/runtime_support_builder.cc
+++ b/compiler/llvm/runtime_support_builder.cc
@@ -20,6 +20,7 @@
#include "ir_builder.h"
#include "monitor.h"
#include "mirror/object.h"
+#include "runtime_support_llvm_func_list.h"
#include "thread.h"
#include <llvm/IR/DerivedTypes.h>
@@ -47,10 +48,7 @@
runtime_support_func_decls_[runtime_support::ID] = fn; \
} while (0);
-#include "runtime_support_llvm_func_list.h"
RUNTIME_SUPPORT_FUNC_LIST(GET_RUNTIME_SUPPORT_FUNC_DECL)
-#undef RUNTIME_SUPPORT_FUNC_LIST
-#undef GET_RUNTIME_SUPPORT_FUNC_DECL
}
diff --git a/compiler/llvm/runtime_support_llvm_func.h b/compiler/llvm/runtime_support_llvm_func.h
index 2634c68..a5ad852 100644
--- a/compiler/llvm/runtime_support_llvm_func.h
+++ b/compiler/llvm/runtime_support_llvm_func.h
@@ -17,16 +17,15 @@
#ifndef ART_COMPILER_LLVM_RUNTIME_SUPPORT_LLVM_FUNC_H_
#define ART_COMPILER_LLVM_RUNTIME_SUPPORT_LLVM_FUNC_H_
+#include "runtime_support_llvm_func_list.h"
+
namespace art {
namespace llvm {
namespace runtime_support {
enum RuntimeId {
#define DEFINE_RUNTIME_SUPPORT_FUNC_ID(ID, NAME) ID,
-#include "runtime_support_llvm_func_list.h"
RUNTIME_SUPPORT_FUNC_LIST(DEFINE_RUNTIME_SUPPORT_FUNC_ID)
-#undef RUNTIME_SUPPORT_FUNC_LIST
-#undef DEFINE_RUNTIME_SUPPORT_FUNC_ID
MAX_ID
};
diff --git a/runtime/runtime_support_llvm_func_list.h b/compiler/llvm/runtime_support_llvm_func_list.h
similarity index 94%
rename from runtime/runtime_support_llvm_func_list.h
rename to compiler/llvm/runtime_support_llvm_func_list.h
index 8b635cb..b5ac1ff 100644
--- a/runtime/runtime_support_llvm_func_list.h
+++ b/compiler/llvm/runtime_support_llvm_func_list.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_RUNTIME_SUPPORT_LLVM_FUNC_LIST_H_
-#define ART_RUNTIME_RUNTIME_SUPPORT_LLVM_FUNC_LIST_H_
+#ifndef ART_COMPILER_LLVM_RUNTIME_SUPPORT_LLVM_FUNC_LIST_H_
+#define ART_COMPILER_LLVM_RUNTIME_SUPPORT_LLVM_FUNC_LIST_H_
#define RUNTIME_SUPPORT_FUNC_LIST(V) \
V(LockObject, art_portable_lock_object_from_code) \
@@ -78,5 +78,4 @@
V(JniMethodEndWithReference, art_portable_jni_method_end_with_reference) \
V(JniMethodEndWithReferenceSynchronized, art_portable_jni_method_end_with_reference_synchronized)
-#endif // ART_RUNTIME_RUNTIME_SUPPORT_LLVM_FUNC_LIST_H_
-#undef ART_RUNTIME_RUNTIME_SUPPORT_LLVM_FUNC_LIST_H_ // the guard in this file is just for cpplint
+#endif // ART_COMPILER_LLVM_RUNTIME_SUPPORT_LLVM_FUNC_LIST_H_
diff --git a/compiler/sea_ir/code_gen.cc b/compiler/sea_ir/code_gen.cc
index f359849..a513907 100644
--- a/compiler/sea_ir/code_gen.cc
+++ b/compiler/sea_ir/code_gen.cc
@@ -66,7 +66,8 @@
std::vector<llvm::Type*> parameter_types(parameters->size(),
llvm::Type::getInt32Ty(*llvm_data_->context_));
// Build llvm function name.
- std::string function_name = art::StringPrintf("class=%d_method=%d", graph->class_def_idx_, graph->method_idx_);
+ std::string function_name = art::StringPrintf(
+ "class=%d_method=%d", graph->class_def_idx_, graph->method_idx_);
// Build llvm function type and parameters.
llvm::FunctionType *function_type = llvm::FunctionType::get(
@@ -259,15 +260,18 @@
void CodeGenVisitor::Visit(SignatureNode* signature) {
std::cout << "Signature: ;" << "Id:" << signature->StringId() << std::endl;
- DCHECK_EQ(signature->GetDefinitions().size(), 1u) << "Signature nodes must correspond to a single parameter register.";
+ DCHECK_EQ(signature->GetDefinitions().size(), 1u) <<
+ "Signature nodes must correspond to a single parameter register.";
}
void CodeGenPrepassVisitor::Visit(SignatureNode* signature) {
std::cout << "Signature: ;" << "Id:" << signature->StringId() << std::endl;
- DCHECK_EQ(signature->GetDefinitions().size(), 1u) << "Signature nodes must correspond to a single parameter register.";
+ DCHECK_EQ(signature->GetDefinitions().size(), 1u) <<
+ "Signature nodes must correspond to a single parameter register.";
}
void CodeGenPostpassVisitor::Visit(SignatureNode* signature) {
std::cout << "Signature: ;" << "Id:" << signature->StringId() << std::endl;
- DCHECK_EQ(signature->GetDefinitions().size(), 1u) << "Signature nodes must correspond to a single parameter register.";
+ DCHECK_EQ(signature->GetDefinitions().size(), 1u) <<
+ "Signature nodes must correspond to a single parameter register.";
}
} // namespace sea_ir
diff --git a/compiler/sea_ir/frontend.cc b/compiler/sea_ir/frontend.cc
index 8fc1cf8..5843388 100644
--- a/compiler/sea_ir/frontend.cc
+++ b/compiler/sea_ir/frontend.cc
@@ -40,7 +40,7 @@
// NOTE: Instead of keeping the convention from the Dalvik frontend.cc
// and silencing the cpplint.py warning, I just corrected the formatting.
VLOG(compiler) << "Compiling " << PrettyMethod(method_idx, dex_file) << "...";
- sea_ir::SeaGraph* sg = sea_ir::SeaGraph::GetCurrentGraph();
+ sea_ir::SeaGraph* sg = sea_ir::SeaGraph::GetCurrentGraph(dex_file);
sg->CompileMethod(code_item, class_def_idx, method_idx, dex_file);
sg->DumpSea("/tmp/temp.dot");
CHECK(0 && "No SEA compiled function exists yet.");
@@ -57,8 +57,8 @@
jobject class_loader,
const DexFile& dex_file,
llvm::LlvmCompilationUnit* llvm_compilation_unit) {
- return CompileMethodWithSeaIr(compiler, backend, code_item, access_flags, invoke_type, class_def_idx,
- method_idx, class_loader, dex_file
+ return CompileMethodWithSeaIr(compiler, backend, code_item, access_flags, invoke_type,
+ class_def_idx, method_idx, class_loader, dex_file
#if defined(ART_USE_PORTABLE_COMPILER)
, llvm_compilation_unit
#endif
@@ -71,7 +71,8 @@
uint32_t access_flags, art::InvokeType invoke_type,
uint32_t class_def_idx, uint32_t method_idx, jobject class_loader,
const art::DexFile& dex_file) {
- // TODO: check method fingerprint here to determine appropriate backend type. Until then, use build default
+ // TODO: Check method fingerprint here to determine appropriate backend type.
+ // Until then, use build default
art::CompilerBackend backend = compiler.GetCompilerBackend();
return art::SeaIrCompileOneMethod(compiler, backend, code_item, access_flags, invoke_type,
class_def_idx, method_idx, class_loader, dex_file,
diff --git a/compiler/sea_ir/instruction_nodes.h b/compiler/sea_ir/instruction_nodes.h
index 5c9cfe1..6f9bddd 100644
--- a/compiler/sea_ir/instruction_nodes.h
+++ b/compiler/sea_ir/instruction_nodes.h
@@ -50,13 +50,14 @@
// Returns the set of register numbers that are used by the instruction.
virtual std::vector<int> GetUses();
// Appends to @result the .dot string representation of the instruction.
- virtual void ToDot(std::string& result) const;
+ virtual void ToDot(std::string& result, const art::DexFile& dex_file) const;
// Mark the current instruction as a downward exposed definition.
void MarkAsDEDef();
// Rename the use of @reg_no to refer to the instruction @definition,
// essentially creating SSA form.
void RenameToSSA(int reg_no, InstructionNode* definition) {
definition_edges_.insert(std::pair<int, InstructionNode*>(reg_no, definition));
+ definition->AddSSAUse(this);
}
// Returns the ordered set of Instructions that define the input operands of this instruction.
// Precondition: SeaGraph.ConvertToSSA().
@@ -69,6 +70,10 @@
return ssa_uses;
}
+ virtual void AddSSAUse(InstructionNode* use) {
+ used_in_.push_back(use);
+ }
+
void Accept(IRVisitor* v) {
v->Visit(this);
v->Traverse(this);
@@ -85,11 +90,14 @@
protected:
explicit InstructionNode(const art::Instruction* in):
- SeaNode(), instruction_(in), de_def_(false), region_(NULL) { }
+ SeaNode(), instruction_(in), used_in_(), de_def_(false), region_(NULL) { }
+ void ToDotSSAEdges(std::string& result) const;
protected:
const art::Instruction* const instruction_;
std::map<int, InstructionNode* > definition_edges_;
+ // Stores pointers to instructions that use the result of the current instruction.
+ std::vector<InstructionNode*> used_in_;
bool de_def_;
Region* region_;
};
@@ -126,7 +134,7 @@
return value_;
}
- void ToDot(std::string& result) const {
+ void ToDot(std::string& result, const art::DexFile& dex_file) const {
std::ostringstream sstream;
sstream << GetConstValue();
const std::string value_as_string(sstream.str());
@@ -136,17 +144,7 @@
result += "style=bold";
}
result += "];\n";
- // SSA definitions:
- for (std::map<int, InstructionNode* >::const_iterator def_it = definition_edges_.begin();
- def_it != definition_edges_.end(); def_it++) {
- if (NULL != def_it->second) {
- result += def_it->second->StringId() + " -> " + StringId() +"[color=red,label=\"";
- std::stringstream ss;
- ss << def_it->first;
- result.append(ss.str());
- result += "\"] ; // ssa edge\n";
- }
- }
+ ToDotSSAEdges(result);
}
private:
diff --git a/compiler/sea_ir/sea.cc b/compiler/sea_ir/sea.cc
index 3488afd..99b21f8 100644
--- a/compiler/sea_ir/sea.cc
+++ b/compiler/sea_ir/sea.cc
@@ -27,7 +27,6 @@
namespace sea_ir {
-SeaGraph SeaGraph::graph_;
int SeaNode::current_max_node_id_ = 0;
void IRVisitor::Traverse(Region* region) {
@@ -51,16 +50,16 @@
}
}
-SeaGraph* SeaGraph::GetCurrentGraph() {
- return &sea_ir::SeaGraph::graph_;
+SeaGraph* SeaGraph::GetCurrentGraph(const art::DexFile& dex_file) {
+ return new SeaGraph(dex_file);
}
void SeaGraph::DumpSea(std::string filename) const {
LOG(INFO) << "Starting to write SEA string to file.";
std::string result;
- result += "digraph seaOfNodes {\n";
+ result += "digraph seaOfNodes {\ncompound=true\n";
for (std::vector<Region*>::const_iterator cit = regions_.begin(); cit != regions_.end(); cit++) {
- (*cit)->ToDot(result);
+ (*cit)->ToDot(result, dex_file_);
}
result += "}\n";
art::File* file = art::OS::OpenFile(filename.c_str(), true, true);
@@ -238,7 +237,8 @@
sea_ir::InstructionNode* node = NULL;
while (i < size_in_code_units) {
const art::Instruction* inst = art::Instruction::At(&code[i]);
- std::vector<InstructionNode*> sea_instructions_for_dalvik = sea_ir::InstructionNode::Create(inst);
+ std::vector<InstructionNode*> sea_instructions_for_dalvik =
+ sea_ir::InstructionNode::Create(inst);
for (std::vector<InstructionNode*>::const_iterator cit = sea_instructions_for_dalvik.begin();
sea_instructions_for_dalvik.end() != cit; ++cit) {
last_node = node;
@@ -250,7 +250,6 @@
DCHECK(it != target_regions.end());
AddEdge(r, it->second); // Add edge to branch target.
}
-
std::map<const uint16_t*, Region*>::iterator it = target_regions.find(&code[i]);
if (target_regions.end() != it) {
// Get the already created region because this is a branch target.
@@ -332,7 +331,8 @@
int global = *globals_it;
// Copy the set, because we will modify the worklist as we go.
std::set<Region*> worklist((*(blocks.find(global))).second);
- for (std::set<Region*>::const_iterator b_it = worklist.begin(); b_it != worklist.end(); b_it++) {
+ for (std::set<Region*>::const_iterator b_it = worklist.begin();
+ b_it != worklist.end(); b_it++) {
std::set<Region*>* df = (*b_it)->GetDominanceFrontier();
for (std::set<Region*>::const_iterator df_it = df->begin(); df_it != df->end(); df_it++) {
if ((*df_it)->InsertPhiFor(global)) {
@@ -490,53 +490,44 @@
return NULL;
}
-void Region::ToDot(std::string& result) const {
- result += "\n// Region: \n" + StringId() + " [label=\"region " + StringId() + "(rpo=";
+void Region::ToDot(std::string& result, const art::DexFile& dex_file) const {
+ result += "\n// Region: \nsubgraph " + StringId() + " { label=\"region " + StringId() + "(rpo=";
result += art::StringPrintf("%d", rpo_number_);
if (NULL != GetIDominator()) {
result += " dom=" + GetIDominator()->StringId();
}
- result += ")\"];\n";
+ result += ")\";\n";
+
+ for (std::vector<PhiInstructionNode*>::const_iterator cit = phi_instructions_.begin();
+ cit != phi_instructions_.end(); cit++) {
+ result += (*cit)->StringId() +";\n";
+ }
+
+ for (std::vector<InstructionNode*>::const_iterator cit = instructions_.begin();
+ cit != instructions_.end(); cit++) {
+ result += (*cit)->StringId() +";\n";
+ }
+
+ result += "} // End Region.\n";
// Save phi-nodes.
for (std::vector<PhiInstructionNode*>::const_iterator cit = phi_instructions_.begin();
cit != phi_instructions_.end(); cit++) {
- (*cit)->ToDot(result);
- result += StringId() + " -> " + (*cit)->StringId() + "; // phi-function \n";
+ (*cit)->ToDot(result, dex_file);
}
// Save instruction nodes.
for (std::vector<InstructionNode*>::const_iterator cit = instructions_.begin();
cit != instructions_.end(); cit++) {
- (*cit)->ToDot(result);
- result += StringId() + " -> " + (*cit)->StringId() + "; // region -> instruction \n";
+ (*cit)->ToDot(result, dex_file);
}
for (std::vector<Region*>::const_iterator cit = successors_.begin(); cit != successors_.end();
cit++) {
DCHECK(NULL != *cit) << "Null successor found for SeaNode" << GetLastChild()->StringId() << ".";
- result += GetLastChild()->StringId() + " -> " + (*cit)->StringId() + ";\n\n";
+ result += GetLastChild()->StringId() + " -> " + (*cit)->GetLastChild()->StringId() +
+ "[lhead=" + (*cit)->StringId() + ", " + "ltail=" + StringId() + "];\n\n";
}
- // Save reaching definitions.
- for (std::map<int, std::set<sea_ir::InstructionNode*>* >::const_iterator cit =
- reaching_defs_.begin();
- cit != reaching_defs_.end(); cit++) {
- for (std::set<sea_ir::InstructionNode*>::const_iterator
- reaching_set_it = (*cit).second->begin();
- reaching_set_it != (*cit).second->end();
- reaching_set_it++) {
- result += (*reaching_set_it)->StringId() +
- " -> " + StringId() +
- " [style=dotted]; // Reaching def.\n";
- }
- }
- // Save dominance frontier.
- for (std::set<Region*>::const_iterator cit = df_.begin(); cit != df_.end(); cit++) {
- result += StringId() +
- " -> " + (*cit)->StringId() +
- " [color=gray]; // Dominance frontier.\n";
- }
- result += "// End Region.\n";
}
void Region::ComputeDownExposedDefs() {
@@ -570,7 +561,8 @@
pred_it != predecessors_.end(); pred_it++) {
// The reaching_defs variable will contain reaching defs __for current predecessor only__
std::map<int, std::set<sea_ir::InstructionNode*>* > reaching_defs;
- std::map<int, std::set<sea_ir::InstructionNode*>* >* pred_reaching = (*pred_it)->GetReachingDefs();
+ std::map<int, std::set<sea_ir::InstructionNode*>* >* pred_reaching =
+ (*pred_it)->GetReachingDefs();
const std::map<int, InstructionNode*>* de_defs = (*pred_it)->GetDownExposedDefs();
// The definitions from the reaching set of the predecessor
@@ -588,7 +580,8 @@
// Now we combine the reaching map coming from the current predecessor (reaching_defs)
// with the accumulated set from all predecessors so far (from new_reaching).
- std::map<int, std::set<sea_ir::InstructionNode*>*>::iterator reaching_it = reaching_defs.begin();
+ std::map<int, std::set<sea_ir::InstructionNode*>*>::iterator reaching_it =
+ reaching_defs.begin();
for (; reaching_it != reaching_defs.end(); reaching_it++) {
std::map<int, std::set<sea_ir::InstructionNode*>*>::iterator crt_entry =
new_reaching.find(reaching_it->first);
@@ -608,7 +601,8 @@
// TODO: Find formal proof.
int old_size = 0;
if (-1 == reaching_defs_size_) {
- std::map<int, std::set<sea_ir::InstructionNode*>*>::iterator reaching_it = reaching_defs_.begin();
+ std::map<int, std::set<sea_ir::InstructionNode*>*>::iterator reaching_it =
+ reaching_defs_.begin();
for (; reaching_it != reaching_defs_.end(); reaching_it++) {
old_size += (*reaching_it).second->size();
}
@@ -698,22 +692,36 @@
return sea_instructions;
}
-void InstructionNode::ToDot(std::string& result) const {
+void InstructionNode::ToDotSSAEdges(std::string& result) const {
+ // SSA definitions:
+ for (std::map<int, InstructionNode*>::const_iterator def_it = definition_edges_.begin();
+ def_it != definition_edges_.end(); def_it++) {
+ if (NULL != def_it->second) {
+ result += def_it->second->StringId() + " -> " + StringId() + "[color=gray,label=\"";
+ result += art::StringPrintf("vR = %d", def_it->first);
+ result += "\"] ; // ssa edge\n";
+ }
+ }
+
+ // SSA used-by:
+ if (DotConversion::SaveUseEdges()) {
+ for (std::vector<InstructionNode*>::const_iterator cit = used_in_.begin();
+ cit != used_in_.end(); cit++) {
+ result += (*cit)->StringId() + " -> " + StringId() + "[color=gray,label=\"";
+ result += "\"] ; // SSA used-by edge\n";
+ }
+ }
+}
+
+void InstructionNode::ToDot(std::string& result, const art::DexFile& dex_file) const {
result += "// Instruction ("+StringId()+"): \n" + StringId() +
- " [label=\"" + instruction_->DumpString(NULL) + "\"";
+ " [label=\"" + instruction_->DumpString(&dex_file) + "\"";
if (de_def_) {
result += "style=bold";
}
result += "];\n";
- // SSA definitions:
- for (std::map<int, InstructionNode* >::const_iterator def_it = definition_edges_.begin();
- def_it != definition_edges_.end(); def_it++) {
- if (NULL != def_it->second) {
- result += def_it->second->StringId() + " -> " + StringId() +"[color=red,label=\"";
- result += art::StringPrintf("%d", def_it->first);
- result += "\"] ; // ssa edge\n";
- }
- }
+
+ ToDotSSAEdges(result);
}
void InstructionNode::MarkAsDEDef() {
@@ -756,22 +764,12 @@
return uses;
}
-void PhiInstructionNode::ToDot(std::string& result) const {
+void PhiInstructionNode::ToDot(std::string& result, const art::DexFile& dex_file) const {
result += "// PhiInstruction: \n" + StringId() +
" [label=\"" + "PHI(";
result += art::StringPrintf("%d", register_no_);
result += ")\"";
result += "];\n";
-
- for (std::vector<std::vector<InstructionNode*>*>::const_iterator pred_it = definition_edges_.begin();
- pred_it != definition_edges_.end(); pred_it++) {
- std::vector<InstructionNode*>* defs_from_pred = *pred_it;
- for (std::vector<InstructionNode* >::const_iterator def_it = defs_from_pred->begin();
- def_it != defs_from_pred->end(); def_it++) {
- result += (*def_it)->StringId() + " -> " + StringId() +"[color=red,label=\"vR = ";
- result += art::StringPrintf("%d", GetRegisterNumber());
- result += "\"] ; // phi-ssa edge\n";
- }
- }
+ ToDotSSAEdges(result);
}
} // namespace sea_ir
diff --git a/compiler/sea_ir/sea.h b/compiler/sea_ir/sea.h
index 25ab1fe..5cb8424 100644
--- a/compiler/sea_ir/sea.h
+++ b/compiler/sea_ir/sea.h
@@ -35,6 +35,17 @@
VISITING = -2
};
+// Stores options for turning a SEA IR graph to a .dot file.
+class DotConversion {
+ public:
+ static bool SaveUseEdges() {
+ return save_use_edges_;
+ }
+
+ private:
+ static const bool save_use_edges_ = false; // TODO: Enable per-sea graph configuration.
+};
+
class Region;
class InstructionNode;
@@ -49,10 +60,11 @@
explicit SignatureNode(unsigned int parameter_register):InstructionNode(NULL),
parameter_register_(parameter_register) { }
- void ToDot(std::string& result) const {
+ void ToDot(std::string& result, const art::DexFile& dex_file) const {
result += StringId() +" [label=\"signature:";
result += art::StringPrintf("r%d", GetResultRegister());
result += "\"] // signature node\n";
+ ToDotSSAEdges(result);
}
int GetResultRegister() const {
@@ -77,7 +89,7 @@
explicit PhiInstructionNode(int register_no):
InstructionNode(NULL), register_no_(register_no), definition_edges_() {}
// Appends to @result the .dot string representation of the instruction.
- void ToDot(std::string& result) const;
+ void ToDot(std::string& result, const art::DexFile& dex_file) const;
// Returns the register on which this phi-function is used.
int GetRegisterNumber() const {
return register_no_;
@@ -98,6 +110,7 @@
definition_edges_[predecessor_id] = new std::vector<InstructionNode*>();
}
definition_edges_[predecessor_id]->push_back(definition);
+ definition->AddSSAUse(this);
}
// Returns the instruction that defines the phi register from predecessor
@@ -125,7 +138,9 @@
public:
explicit Region():
SeaNode(), successors_(), predecessors_(), reaching_defs_size_(0),
- rpo_number_(NOT_VISITED), idom_(NULL), idominated_set_(), df_(), phi_set_() {}
+ rpo_number_(NOT_VISITED), idom_(NULL), idominated_set_(), df_(), phi_set_() {
+ string_id_ = "cluster_" + string_id_;
+ }
// Adds @instruction as an instruction node child in the current region.
void AddChild(sea_ir::InstructionNode* instruction);
// Returns the last instruction node child of the current region.
@@ -138,7 +153,7 @@
// Appends to @result a dot language formatted string representing the node and
// (by convention) outgoing edges, so that the composition of theToDot() of all nodes
// builds a complete dot graph (without prolog and epilog though).
- virtual void ToDot(std::string& result) const;
+ virtual void ToDot(std::string& result, const art::DexFile& dex_file) const;
// Computes Downward Exposed Definitions for the current node.
void ComputeDownExposedDefs();
const std::map<int, sea_ir::InstructionNode*>* GetDownExposedDefs() const;
@@ -242,7 +257,7 @@
// and acts as starting point for visitors (ex: during code generation).
class SeaGraph: IVisitable {
public:
- static SeaGraph* GetCurrentGraph();
+ static SeaGraph* GetCurrentGraph(const art::DexFile&);
void CompileMethod(const art::DexFile::CodeItem* code_item,
uint32_t class_def_idx, uint32_t method_idx, const art::DexFile& dex_file);
@@ -264,7 +279,8 @@
uint32_t method_idx_;
private:
- SeaGraph(): class_def_idx_(0), method_idx_(0), regions_(), parameters_() {
+ explicit SeaGraph(const art::DexFile& df):
+ class_def_idx_(0), method_idx_(0), regions_(), parameters_(), dex_file_(df) {
}
// Registers @childReg as a region belonging to the SeaGraph instance.
void AddRegion(Region* childReg);
@@ -319,6 +335,7 @@
static SeaGraph graph_;
std::vector<Region*> regions_;
std::vector<SignatureNode*> parameters_;
+ const art::DexFile& dex_file_;
};
} // namespace sea_ir
#endif // ART_COMPILER_SEA_IR_SEA_H_
diff --git a/compiler/sea_ir/sea_node.h b/compiler/sea_ir/sea_node.h
index 5d28f8a..c13e5d6 100644
--- a/compiler/sea_ir/sea_node.h
+++ b/compiler/sea_ir/sea_node.h
@@ -30,7 +30,7 @@
};
// This abstract class provides the essential services that
-// we want each SEA IR element should have.
+// we want each SEA IR element to have.
// At the moment, these are:
// - an id and corresponding string representation.
// - a .dot graph language representation for .dot output.
@@ -42,6 +42,7 @@
explicit SeaNode():id_(GetNewId()), string_id_() {
string_id_ = art::StringPrintf("%d", id_);
}
+
// Adds CFG predecessors and successors to each block.
void AddSuccessor(Region* successor);
void AddPredecessor(Region* predecesor);
@@ -58,7 +59,7 @@
// Appends to @result a dot language formatted string representing the node and
// (by convention) outgoing edges, so that the composition of theToDot() of all nodes
// builds a complete dot graph, but without prolog ("digraph {") and epilog ("}").
- virtual void ToDot(std::string& result) const = 0;
+ virtual void ToDot(std::string& result, const art::DexFile& dex_file) const = 0;
virtual ~SeaNode() { }
diff --git a/compiler/stubs/portable/stubs.cc b/compiler/stubs/portable/stubs.cc
index 69568d7..def43e2 100644
--- a/compiler/stubs/portable/stubs.cc
+++ b/compiler/stubs/portable/stubs.cc
@@ -16,11 +16,11 @@
#include "stubs/stubs.h"
+#include "entrypoints/quick/quick_entrypoints.h"
#include "jni_internal.h"
-#include "oat/utils/arm/assembler_arm.h"
-#include "oat/utils/mips/assembler_mips.h"
-#include "oat/utils/x86/assembler_x86.h"
-#include "oat/runtime/oat_support_entrypoints.h"
+#include "utils/arm/assembler_arm.h"
+#include "utils/mips/assembler_mips.h"
+#include "utils/x86/assembler_x86.h"
#include "stack_indirect_reference_table.h"
#include "sirt_ref.h"
@@ -34,7 +34,8 @@
RegList save = (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3) | (1 << LR);
__ PushList(save);
- __ LoadFromOffset(kLoadWord, R12, TR, ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode));
+ __ LoadFromOffset(kLoadWord, R12, TR,
+ PORTABLE_ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode));
__ mov(R3, ShifterOperand(TR)); // Pass Thread::Current() in R3
__ mov(R2, ShifterOperand(SP)); // Pass sp for Method** callee_addr
__ IncreaseFrameSize(12); // 3 words of space for alignment
@@ -69,7 +70,7 @@
__ StoreToOffset(kStoreWord, A0, SP, 0);
__ LoadFromOffset(kLoadWord, T9, S1,
- ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode));
+ PORTABLE_ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode));
__ Move(A3, S1); // Pass Thread::Current() in A3
__ Move(A2, SP); // Pass SP for Method** callee_addr
__ Jalr(T9); // Call to resolution trampoline (callee, receiver, callee_addr, Thread*)
@@ -112,7 +113,7 @@
__ pushl(ECX); // pass receiver
__ pushl(EAX); // pass called
// Call to resolve method.
- __ Call(ThreadOffset(ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode)),
+ __ Call(ThreadOffset(PORTABLE_ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode)),
X86ManagedRegister::FromCpuRegister(ECX));
__ leave();
diff --git a/compiler/stubs/quick/stubs.cc b/compiler/stubs/quick/stubs.cc
index 8fc2a81..912f1c0 100644
--- a/compiler/stubs/quick/stubs.cc
+++ b/compiler/stubs/quick/stubs.cc
@@ -16,11 +16,11 @@
#include "stubs/stubs.h"
+#include "entrypoints/quick/quick_entrypoints.h"
#include "jni_internal.h"
-#include "oat/runtime/oat_support_entrypoints.h"
-#include "oat/utils/arm/assembler_arm.h"
-#include "oat/utils/mips/assembler_mips.h"
-#include "oat/utils/x86/assembler_x86.h"
+#include "utils/arm/assembler_arm.h"
+#include "utils/mips/assembler_mips.h"
+#include "utils/x86/assembler_x86.h"
#include "sirt_ref.h"
#include "stack_indirect_reference_table.h"
@@ -46,7 +46,7 @@
// TODO: enable when GetCalleeSaveMethod is available at stub generation time
// DCHECK_EQ(save, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetCoreSpillMask());
__ PushList(save);
- __ LoadFromOffset(kLoadWord, R12, TR, ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode));
+ __ LoadFromOffset(kLoadWord, R12, TR, QUICK_ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode));
__ mov(R3, ShifterOperand(TR)); // Pass Thread::Current() in R3
__ IncreaseFrameSize(8); // 2 words of space for alignment
__ mov(R2, ShifterOperand(SP)); // Pass SP
@@ -71,7 +71,7 @@
const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry() {
UniquePtr<ArmAssembler> assembler(static_cast<ArmAssembler*>(Assembler::Create(kArm)));
- __ LoadFromOffset(kLoadWord, PC, R0, ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry));
+ __ LoadFromOffset(kLoadWord, PC, R0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry));
__ bkpt(0);
size_t cs = assembler->CodeSize();
@@ -85,7 +85,7 @@
const std::vector<uint8_t>* CreateInterpreterToQuickEntry() {
UniquePtr<ArmAssembler> assembler(static_cast<ArmAssembler*>(Assembler::Create(kArm)));
- __ LoadFromOffset(kLoadWord, PC, R0, ENTRYPOINT_OFFSET(pInterpreterToQuickEntry));
+ __ LoadFromOffset(kLoadWord, PC, R0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToQuickEntry));
__ bkpt(0);
size_t cs = assembler->CodeSize();
@@ -123,7 +123,7 @@
__ StoreToOffset(kStoreWord, A2, SP, 8);
__ StoreToOffset(kStoreWord, A1, SP, 4);
- __ LoadFromOffset(kLoadWord, T9, S1, ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode));
+ __ LoadFromOffset(kLoadWord, T9, S1, QUICK_ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode));
__ Move(A3, S1); // Pass Thread::Current() in A3
__ Move(A2, SP); // Pass SP for Method** callee_addr
__ Jalr(T9); // Call to resolution trampoline (method_idx, receiver, sp, Thread*)
@@ -161,7 +161,7 @@
const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry() {
UniquePtr<MipsAssembler> assembler(static_cast<MipsAssembler*>(Assembler::Create(kMips)));
- __ LoadFromOffset(kLoadWord, T9, A0, ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry));
+ __ LoadFromOffset(kLoadWord, T9, A0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry));
__ Jr(T9);
__ Break();
@@ -176,7 +176,7 @@
const std::vector<uint8_t>* CreateInterpreterToQuickEntry() {
UniquePtr<MipsAssembler> assembler(static_cast<MipsAssembler*>(Assembler::Create(kMips)));
- __ LoadFromOffset(kLoadWord, T9, A0, ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry));
+ __ LoadFromOffset(kLoadWord, T9, A0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry));
__ Jr(T9);
__ Break();
@@ -208,7 +208,7 @@
__ pushl(EAX); // pass Method*
// Call to resolve method.
- __ Call(ThreadOffset(ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode)),
+ __ Call(ThreadOffset(QUICK_ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode)),
X86ManagedRegister::FromCpuRegister(ECX));
__ movl(EDI, EAX); // save code pointer in EDI
@@ -236,7 +236,7 @@
const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry() {
UniquePtr<X86Assembler> assembler(static_cast<X86Assembler*>(Assembler::Create(kX86)));
- __ fs()->jmp(Address::Absolute(ThreadOffset(ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry))));
+ __ fs()->jmp(Address::Absolute(ThreadOffset(QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry))));
size_t cs = assembler->CodeSize();
UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
@@ -249,7 +249,7 @@
const std::vector<uint8_t>* CreateInterpreterToQuickEntry() {
UniquePtr<X86Assembler> assembler(static_cast<X86Assembler*>(Assembler::Create(kX86)));
- __ fs()->jmp(Address::Absolute(ThreadOffset(ENTRYPOINT_OFFSET(pInterpreterToQuickEntry))));
+ __ fs()->jmp(Address::Absolute(ThreadOffset(QUICK_ENTRYPOINT_OFFSET(pInterpreterToQuickEntry))));
size_t cs = assembler->CodeSize();
UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
diff --git a/runtime/oat/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
similarity index 99%
rename from runtime/oat/utils/arm/assembler_arm.cc
rename to compiler/utils/arm/assembler_arm.cc
index 960a60d..fa202c3 100644
--- a/runtime/oat/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -17,7 +17,7 @@
#include "assembler_arm.h"
#include "base/logging.h"
-#include "oat/runtime/oat_support_entrypoints.h"
+#include "entrypoints/quick/quick_entrypoints.h"
#include "offsets.h"
#include "thread.h"
#include "utils.h"
@@ -1884,7 +1884,7 @@
// Don't care about preserving R0 as this call won't return
__ mov(R0, ShifterOperand(scratch_.AsCoreRegister()));
// Set up call to Thread::Current()->pDeliverException
- __ LoadFromOffset(kLoadWord, R12, TR, ENTRYPOINT_OFFSET(pDeliverException));
+ __ LoadFromOffset(kLoadWord, R12, TR, QUICK_ENTRYPOINT_OFFSET(pDeliverException));
__ blx(R12);
// Call never returns
__ bkpt(0);
diff --git a/runtime/oat/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
similarity index 98%
rename from runtime/oat/utils/arm/assembler_arm.h
rename to compiler/utils/arm/assembler_arm.h
index b8c79d21..757a8a2 100644
--- a/runtime/oat/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -14,15 +14,15 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_OAT_UTILS_ARM_ASSEMBLER_ARM_H_
-#define ART_RUNTIME_OAT_UTILS_ARM_ASSEMBLER_ARM_H_
+#ifndef ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_H_
+#define ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_H_
#include <vector>
#include "base/logging.h"
#include "constants_arm.h"
-#include "oat/utils/arm/managed_register_arm.h"
-#include "oat/utils/assembler.h"
+#include "utils/arm/managed_register_arm.h"
+#include "utils/assembler.h"
#include "offsets.h"
#include "utils.h"
@@ -656,4 +656,4 @@
} // namespace arm
} // namespace art
-#endif // ART_RUNTIME_OAT_UTILS_ARM_ASSEMBLER_ARM_H_
+#endif // ART_COMPILER_UTILS_ARM_ASSEMBLER_ARM_H_
diff --git a/runtime/constants_arm.h b/compiler/utils/arm/constants_arm.h
similarity index 91%
rename from runtime/constants_arm.h
rename to compiler/utils/arm/constants_arm.h
index bbb9242..cc795b1 100644
--- a/runtime/constants_arm.h
+++ b/compiler/utils/arm/constants_arm.h
@@ -14,13 +14,14 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_CONSTANTS_ARM_H_
-#define ART_RUNTIME_CONSTANTS_ARM_H_
+#ifndef ART_COMPILER_UTILS_ARM_CONSTANTS_ARM_H_
+#define ART_COMPILER_UTILS_ARM_CONSTANTS_ARM_H_
#include <stdint.h>
#include <iosfwd>
+#include "arch/arm/registers_arm.h"
#include "base/casts.h"
#include "base/logging.h"
#include "globals.h"
@@ -47,36 +48,6 @@
#endif
-// Values for registers.
-enum Register {
- R0 = 0,
- R1 = 1,
- R2 = 2,
- R3 = 3,
- R4 = 4,
- R5 = 5,
- R6 = 6,
- R7 = 7,
- R8 = 8,
- R9 = 9,
- R10 = 10,
- R11 = 11,
- R12 = 12,
- R13 = 13,
- R14 = 14,
- R15 = 15,
- TR = 9, // thread register
- FP = 11,
- IP = 12,
- SP = 13,
- LR = 14,
- PC = 15,
- kNumberOfCoreRegisters = 16,
- kNoRegister = -1,
-};
-std::ostream& operator<<(std::ostream& os, const Register& rhs);
-
-
enum ScaleFactor {
TIMES_1 = 0,
TIMES_2 = 1,
@@ -84,47 +55,6 @@
TIMES_8 = 3
};
-
-// Values for single-precision floating point registers.
-enum SRegister {
- S0 = 0,
- S1 = 1,
- S2 = 2,
- S3 = 3,
- S4 = 4,
- S5 = 5,
- S6 = 6,
- S7 = 7,
- S8 = 8,
- S9 = 9,
- S10 = 10,
- S11 = 11,
- S12 = 12,
- S13 = 13,
- S14 = 14,
- S15 = 15,
- S16 = 16,
- S17 = 17,
- S18 = 18,
- S19 = 19,
- S20 = 20,
- S21 = 21,
- S22 = 22,
- S23 = 23,
- S24 = 24,
- S25 = 25,
- S26 = 26,
- S27 = 27,
- S28 = 28,
- S29 = 29,
- S30 = 30,
- S31 = 31,
- kNumberOfSRegisters = 32,
- kNoSRegister = -1,
-};
-std::ostream& operator<<(std::ostream& os, const SRegister& rhs);
-
-
// Values for double-precision floating point registers.
enum DRegister {
D0 = 0,
@@ -516,4 +446,4 @@
} // namespace arm
} // namespace art
-#endif // ART_RUNTIME_CONSTANTS_ARM_H_
+#endif // ART_COMPILER_UTILS_ARM_CONSTANTS_ARM_H_
diff --git a/runtime/oat/utils/arm/managed_register_arm.cc b/compiler/utils/arm/managed_register_arm.cc
similarity index 100%
rename from runtime/oat/utils/arm/managed_register_arm.cc
rename to compiler/utils/arm/managed_register_arm.cc
diff --git a/runtime/oat/utils/arm/managed_register_arm.h b/compiler/utils/arm/managed_register_arm.h
similarity index 97%
rename from runtime/oat/utils/arm/managed_register_arm.h
rename to compiler/utils/arm/managed_register_arm.h
index 01596bb..a496c87 100644
--- a/runtime/oat/utils/arm/managed_register_arm.h
+++ b/compiler/utils/arm/managed_register_arm.h
@@ -14,12 +14,12 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_OAT_UTILS_ARM_MANAGED_REGISTER_ARM_H_
-#define ART_RUNTIME_OAT_UTILS_ARM_MANAGED_REGISTER_ARM_H_
+#ifndef ART_COMPILER_UTILS_ARM_MANAGED_REGISTER_ARM_H_
+#define ART_COMPILER_UTILS_ARM_MANAGED_REGISTER_ARM_H_
#include "base/logging.h"
#include "constants_arm.h"
-#include "oat/utils/managed_register.h"
+#include "utils/managed_register.h"
namespace art {
namespace arm {
@@ -271,4 +271,4 @@
} // namespace art
-#endif // ART_RUNTIME_OAT_UTILS_ARM_MANAGED_REGISTER_ARM_H_
+#endif // ART_COMPILER_UTILS_ARM_MANAGED_REGISTER_ARM_H_
diff --git a/runtime/oat/utils/arm/managed_register_arm_test.cc b/compiler/utils/arm/managed_register_arm_test.cc
similarity index 100%
rename from runtime/oat/utils/arm/managed_register_arm_test.cc
rename to compiler/utils/arm/managed_register_arm_test.cc
diff --git a/runtime/oat/utils/assembler.cc b/compiler/utils/assembler.cc
similarity index 100%
rename from runtime/oat/utils/assembler.cc
rename to compiler/utils/assembler.cc
diff --git a/runtime/oat/utils/assembler.h b/compiler/utils/assembler.h
similarity index 98%
rename from runtime/oat/utils/assembler.h
rename to compiler/utils/assembler.h
index 05e2732..9d79002 100644
--- a/runtime/oat/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -14,16 +14,16 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_OAT_UTILS_ASSEMBLER_H_
-#define ART_RUNTIME_OAT_UTILS_ASSEMBLER_H_
+#ifndef ART_COMPILER_UTILS_ASSEMBLER_H_
+#define ART_COMPILER_UTILS_ASSEMBLER_H_
#include <vector>
#include "base/logging.h"
#include "base/macros.h"
-#include "constants_arm.h"
-#include "constants_mips.h"
-#include "constants_x86.h"
+#include "arm/constants_arm.h"
+#include "mips/constants_mips.h"
+#include "x86/constants_x86.h"
#include "instruction_set.h"
#include "managed_register.h"
#include "memory_region.h"
@@ -456,4 +456,4 @@
} // namespace art
-#endif // ART_RUNTIME_OAT_UTILS_ASSEMBLER_H_
+#endif // ART_COMPILER_UTILS_ASSEMBLER_H_
diff --git a/runtime/oat/utils/managed_register.h b/compiler/utils/managed_register.h
similarity index 91%
rename from runtime/oat/utils/managed_register.h
rename to compiler/utils/managed_register.h
index 4dd2acd..4ad1763 100644
--- a/runtime/oat/utils/managed_register.h
+++ b/compiler/utils/managed_register.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_OAT_UTILS_MANAGED_REGISTER_H_
-#define ART_RUNTIME_OAT_UTILS_MANAGED_REGISTER_H_
+#ifndef ART_COMPILER_UTILS_MANAGED_REGISTER_H_
+#define ART_COMPILER_UTILS_MANAGED_REGISTER_H_
namespace art {
@@ -69,4 +69,4 @@
} // namespace art
-#endif // ART_RUNTIME_OAT_UTILS_MANAGED_REGISTER_H_
+#endif // ART_COMPILER_UTILS_MANAGED_REGISTER_H_
diff --git a/runtime/oat/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
similarity index 97%
rename from runtime/oat/utils/mips/assembler_mips.cc
rename to compiler/utils/mips/assembler_mips.cc
index 25ba9b2..931d7ab 100644
--- a/runtime/oat/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -17,8 +17,8 @@
#include "assembler_mips.h"
#include "base/casts.h"
+#include "entrypoints/quick/quick_entrypoints.h"
#include "memory_region.h"
-#include "oat/runtime/oat_support_entrypoints.h"
#include "thread.h"
namespace art {
@@ -36,30 +36,6 @@
};
#endif
-static const char* kRegisterNames[] = {
- "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
- "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
- "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
- "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra",
-};
-std::ostream& operator<<(std::ostream& os, const Register& rhs) {
- if (rhs >= ZERO && rhs <= RA) {
- os << kRegisterNames[rhs];
- } else {
- os << "Register[" << static_cast<int>(rhs) << "]";
- }
- return os;
-}
-
-std::ostream& operator<<(std::ostream& os, const FRegister& rhs) {
- if (rhs >= F0 && rhs < kNumberOfFRegisters) {
- os << "f" << static_cast<int>(rhs);
- } else {
- os << "FRegister[" << static_cast<int>(rhs) << "]";
- }
- return os;
-}
-
std::ostream& operator<<(std::ostream& os, const DRegister& rhs) {
if (rhs >= D0 && rhs < kNumberOfDRegisters) {
os << "d" << static_cast<int>(rhs);
@@ -1012,7 +988,7 @@
// Don't care about preserving A0 as this call won't return
__ Move(A0, scratch_.AsCoreRegister());
// Set up call to Thread::Current()->pDeliverException
- __ LoadFromOffset(kLoadWord, T9, S1, ENTRYPOINT_OFFSET(pDeliverException));
+ __ LoadFromOffset(kLoadWord, T9, S1, QUICK_ENTRYPOINT_OFFSET(pDeliverException));
__ Jr(T9);
// Call never returns
__ Break();
diff --git a/runtime/oat/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h
similarity index 98%
rename from runtime/oat/utils/mips/assembler_mips.h
rename to compiler/utils/mips/assembler_mips.h
index 8f4a33a..0f5f2fe 100644
--- a/runtime/oat/utils/mips/assembler_mips.h
+++ b/compiler/utils/mips/assembler_mips.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_OAT_UTILS_MIPS_ASSEMBLER_MIPS_H_
-#define ART_RUNTIME_OAT_UTILS_MIPS_ASSEMBLER_MIPS_H_
+#ifndef ART_COMPILER_UTILS_MIPS_ASSEMBLER_MIPS_H_
+#define ART_COMPILER_UTILS_MIPS_ASSEMBLER_MIPS_H_
#include <vector>
@@ -23,7 +23,7 @@
#include "constants_mips.h"
#include "globals.h"
#include "managed_register_mips.h"
-#include "oat/utils/assembler.h"
+#include "utils/assembler.h"
#include "offsets.h"
#include "utils.h"
@@ -504,4 +504,4 @@
} // namespace mips
} // namespace art
-#endif // ART_RUNTIME_OAT_UTILS_MIPS_ASSEMBLER_MIPS_H_
+#endif // ART_COMPILER_UTILS_MIPS_ASSEMBLER_MIPS_H_
diff --git a/runtime/constants_mips.h b/compiler/utils/mips/constants_mips.h
similarity index 61%
rename from runtime/constants_mips.h
rename to compiler/utils/mips/constants_mips.h
index fb56493..44ed5cc 100644
--- a/runtime/constants_mips.h
+++ b/compiler/utils/mips/constants_mips.h
@@ -14,11 +14,12 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_CONSTANTS_MIPS_H_
-#define ART_RUNTIME_CONSTANTS_MIPS_H_
+#ifndef ART_COMPILER_UTILS_MIPS_CONSTANTS_MIPS_H_
+#define ART_COMPILER_UTILS_MIPS_CONSTANTS_MIPS_H_
#include <iosfwd>
+#include "arch/mips/registers_mips.h"
#include "base/logging.h"
#include "base/macros.h"
#include "globals.h"
@@ -26,83 +27,6 @@
namespace art {
namespace mips {
-enum Register {
- ZERO = 0,
- AT = 1, // Assembler temporary.
- V0 = 2, // Values.
- V1 = 3,
- A0 = 4, // Arguments.
- A1 = 5,
- A2 = 6,
- A3 = 7,
- T0 = 8, // Temporaries.
- T1 = 9,
- T2 = 10,
- T3 = 11,
- T4 = 12,
- T5 = 13,
- T6 = 14,
- T7 = 15,
- S0 = 16, // Saved values.
- S1 = 17,
- S2 = 18,
- S3 = 19,
- S4 = 20,
- S5 = 21,
- S6 = 22,
- S7 = 23,
- T8 = 24, // More temporaries.
- T9 = 25,
- K0 = 26, // Reserved for trap handler.
- K1 = 27,
- GP = 28, // Global pointer.
- SP = 29, // Stack pointer.
- FP = 30, // Saved value/frame pointer.
- RA = 31, // Return address.
- kNumberOfCoreRegisters = 32,
- kNoRegister = -1 // Signals an illegal register.
-};
-std::ostream& operator<<(std::ostream& os, const Register& rhs);
-
-// Values for single-precision floating point registers.
-enum FRegister {
- F0 = 0,
- F1 = 1,
- F2 = 2,
- F3 = 3,
- F4 = 4,
- F5 = 5,
- F6 = 6,
- F7 = 7,
- F8 = 8,
- F9 = 9,
- F10 = 10,
- F11 = 11,
- F12 = 12,
- F13 = 13,
- F14 = 14,
- F15 = 15,
- F16 = 16,
- F17 = 17,
- F18 = 18,
- F19 = 19,
- F20 = 20,
- F21 = 21,
- F22 = 22,
- F23 = 23,
- F24 = 24,
- F25 = 25,
- F26 = 26,
- F27 = 27,
- F28 = 28,
- F29 = 29,
- F30 = 30,
- F31 = 31,
- kNumberOfFRegisters = 32,
- kNoFRegister = -1,
-};
-std::ostream& operator<<(std::ostream& os, const FRegister& rhs);
-
// Values for double-precision floating point registers.
enum DRegister {
D0 = 0,
@@ -183,4 +107,4 @@
} // namespace mips
} // namespace art
-#endif // ART_RUNTIME_CONSTANTS_MIPS_H_
+#endif // ART_COMPILER_UTILS_MIPS_CONSTANTS_MIPS_H_
diff --git a/runtime/oat/utils/mips/managed_register_mips.cc b/compiler/utils/mips/managed_register_mips.cc
similarity index 100%
rename from runtime/oat/utils/mips/managed_register_mips.cc
rename to compiler/utils/mips/managed_register_mips.cc
diff --git a/runtime/oat/utils/mips/managed_register_mips.h b/compiler/utils/mips/managed_register_mips.h
similarity index 96%
rename from runtime/oat/utils/mips/managed_register_mips.h
rename to compiler/utils/mips/managed_register_mips.h
index b335ff9..dd55cc4 100644
--- a/runtime/oat/utils/mips/managed_register_mips.h
+++ b/compiler/utils/mips/managed_register_mips.h
@@ -14,11 +14,11 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_OAT_UTILS_MIPS_MANAGED_REGISTER_MIPS_H_
-#define ART_RUNTIME_OAT_UTILS_MIPS_MANAGED_REGISTER_MIPS_H_
+#ifndef ART_COMPILER_UTILS_MIPS_MANAGED_REGISTER_MIPS_H_
+#define ART_COMPILER_UTILS_MIPS_MANAGED_REGISTER_MIPS_H_
#include "constants_mips.h"
-#include "oat/utils/managed_register.h"
+#include "utils/managed_register.h"
namespace art {
namespace mips {
@@ -225,4 +225,4 @@
} // namespace art
-#endif // ART_RUNTIME_OAT_UTILS_MIPS_MANAGED_REGISTER_MIPS_H_
+#endif // ART_COMPILER_UTILS_MIPS_MANAGED_REGISTER_MIPS_H_
diff --git a/runtime/oat/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
similarity index 98%
rename from runtime/oat/utils/x86/assembler_x86.cc
rename to compiler/utils/x86/assembler_x86.cc
index fd8f152..9095180 100644
--- a/runtime/oat/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -17,8 +17,8 @@
#include "assembler_x86.h"
#include "base/casts.h"
+#include "entrypoints/quick/quick_entrypoints.h"
#include "memory_region.h"
-#include "oat/runtime/oat_support_entrypoints.h"
#include "thread.h"
namespace art {
@@ -35,18 +35,6 @@
}
};
-static const char* kRegisterNames[] = {
- "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi",
-};
-std::ostream& operator<<(std::ostream& os, const Register& rhs) {
- if (rhs >= EAX && rhs <= EDI) {
- os << kRegisterNames[rhs];
- } else {
- os << "Register[" << static_cast<int>(rhs) << "]";
- }
- return os;
-}
-
std::ostream& operator<<(std::ostream& os, const XmmRegister& reg) {
return os << "XMM" << static_cast<int>(reg);
}
@@ -1849,7 +1837,7 @@
}
// Pass exception as argument in EAX
__ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset()));
- __ fs()->call(Address::Absolute(ENTRYPOINT_OFFSET(pDeliverException)));
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(pDeliverException)));
// this call should never return
__ int3();
#undef __
diff --git a/runtime/oat/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
similarity index 98%
rename from runtime/oat/utils/x86/assembler_x86.h
rename to compiler/utils/x86/assembler_x86.h
index e0fbe0e..4ba03d1 100644
--- a/runtime/oat/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -14,16 +14,16 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_OAT_UTILS_X86_ASSEMBLER_X86_H_
-#define ART_RUNTIME_OAT_UTILS_X86_ASSEMBLER_X86_H_
+#ifndef ART_COMPILER_UTILS_X86_ASSEMBLER_X86_H_
+#define ART_COMPILER_UTILS_X86_ASSEMBLER_X86_H_
#include <vector>
#include "base/macros.h"
#include "constants_x86.h"
#include "globals.h"
#include "managed_register_x86.h"
-#include "oat/utils/assembler.h"
#include "offsets.h"
+#include "utils/assembler.h"
#include "utils.h"
namespace art {
@@ -643,4 +643,4 @@
} // namespace x86
} // namespace art
-#endif // ART_RUNTIME_OAT_UTILS_X86_ASSEMBLER_X86_H_
+#endif // ART_COMPILER_UTILS_X86_ASSEMBLER_X86_H_
diff --git a/runtime/oat/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc
similarity index 100%
rename from runtime/oat/utils/x86/assembler_x86_test.cc
rename to compiler/utils/x86/assembler_x86_test.cc
diff --git a/runtime/constants_x86.h b/compiler/utils/x86/constants_x86.h
similarity index 86%
rename from runtime/constants_x86.h
rename to compiler/utils/x86/constants_x86.h
index bb18b6b..45c3834 100644
--- a/runtime/constants_x86.h
+++ b/compiler/utils/x86/constants_x86.h
@@ -14,11 +14,12 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_CONSTANTS_X86_H_
-#define ART_RUNTIME_CONSTANTS_X86_H_
+#ifndef ART_COMPILER_UTILS_X86_CONSTANTS_X86_H_
+#define ART_COMPILER_UTILS_X86_CONSTANTS_X86_H_
#include <iosfwd>
+#include "arch/x86/registers_x86.h"
#include "base/logging.h"
#include "base/macros.h"
#include "globals.h"
@@ -26,21 +27,6 @@
namespace art {
namespace x86 {
-enum Register {
- EAX = 0,
- ECX = 1,
- EDX = 2,
- EBX = 3,
- ESP = 4,
- EBP = 5,
- ESI = 6,
- EDI = 7,
- kNumberOfCpuRegisters = 8,
- kFirstByteUnsafeRegister = 4,
- kNoRegister = -1 // Signals an illegal register.
-};
-std::ostream& operator<<(std::ostream& os, const Register& rhs);
-
enum ByteRegister {
AL = 0,
CL = 1,
@@ -137,4 +123,4 @@
} // namespace x86
} // namespace art
-#endif // ART_RUNTIME_CONSTANTS_X86_H_
+#endif // ART_COMPILER_UTILS_X86_CONSTANTS_X86_H_
diff --git a/runtime/oat/utils/x86/managed_register_x86.cc b/compiler/utils/x86/managed_register_x86.cc
similarity index 100%
rename from runtime/oat/utils/x86/managed_register_x86.cc
rename to compiler/utils/x86/managed_register_x86.cc
diff --git a/runtime/oat/utils/x86/managed_register_x86.h b/compiler/utils/x86/managed_register_x86.h
similarity index 96%
rename from runtime/oat/utils/x86/managed_register_x86.h
rename to compiler/utils/x86/managed_register_x86.h
index b564a83..0201a96 100644
--- a/runtime/oat/utils/x86/managed_register_x86.h
+++ b/compiler/utils/x86/managed_register_x86.h
@@ -14,11 +14,11 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_OAT_UTILS_X86_MANAGED_REGISTER_X86_H_
-#define ART_RUNTIME_OAT_UTILS_X86_MANAGED_REGISTER_X86_H_
+#ifndef ART_COMPILER_UTILS_X86_MANAGED_REGISTER_X86_H_
+#define ART_COMPILER_UTILS_X86_MANAGED_REGISTER_X86_H_
#include "constants_x86.h"
-#include "oat/utils/managed_register.h"
+#include "utils/managed_register.h"
namespace art {
namespace x86 {
@@ -215,4 +215,4 @@
} // namespace art
-#endif // ART_RUNTIME_OAT_UTILS_X86_MANAGED_REGISTER_X86_H_
+#endif // ART_COMPILER_UTILS_X86_MANAGED_REGISTER_X86_H_
diff --git a/runtime/oat/utils/x86/managed_register_x86_test.cc b/compiler/utils/x86/managed_register_x86_test.cc
similarity index 100%
rename from runtime/oat/utils/x86/managed_register_x86_test.cc
rename to compiler/utils/x86/managed_register_x86_test.cc
diff --git a/runtime/vector_output_stream.cc b/compiler/vector_output_stream.cc
similarity index 100%
rename from runtime/vector_output_stream.cc
rename to compiler/vector_output_stream.cc
diff --git a/runtime/vector_output_stream.h b/compiler/vector_output_stream.h
similarity index 92%
rename from runtime/vector_output_stream.h
rename to compiler/vector_output_stream.h
index 7daa39f..a3f8226 100644
--- a/runtime/vector_output_stream.h
+++ b/compiler/vector_output_stream.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_VECTOR_OUTPUT_STREAM_H_
-#define ART_RUNTIME_VECTOR_OUTPUT_STREAM_H_
+#ifndef ART_COMPILER_VECTOR_OUTPUT_STREAM_H_
+#define ART_COMPILER_VECTOR_OUTPUT_STREAM_H_
#include "output_stream.h"
@@ -62,4 +62,4 @@
} // namespace art
-#endif // ART_RUNTIME_VECTOR_OUTPUT_STREAM_H_
+#endif // ART_COMPILER_VECTOR_OUTPUT_STREAM_H_
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index f79ddb1..c8c4347 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -230,7 +230,7 @@
bool image,
UniquePtr<CompilerDriver::DescriptorSet>& image_classes,
bool dump_stats,
- TimingLogger& timings)
+ base::TimingLogger& timings)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// SirtRef and ClassLoader creation needs to come after Runtime::Create
jobject class_loader = NULL;
@@ -263,11 +263,11 @@
Thread::Current()->TransitionFromRunnableToSuspended(kNative);
- timings.AddSplit("dex2oat Setup");
driver->CompileAll(class_loader, dex_files, timings);
Thread::Current()->TransitionFromSuspendedToRunnable();
+ timings.NewSplit("dex2oat OatWriter");
std::string image_file_location;
uint32_t image_file_location_oat_checksum = 0;
uint32_t image_file_location_oat_data_begin = 0;
@@ -287,13 +287,11 @@
image_file_location_oat_data_begin,
image_file_location,
driver.get());
- timings.AddSplit("dex2oat OatWriter");
if (!driver->WriteElf(android_root, is_host, dex_files, oat_writer, oat_file)) {
LOG(ERROR) << "Failed to write ELF file " << oat_file->GetPath();
return NULL;
}
- timings.AddSplit("dex2oat ElfWriter");
return driver.release();
}
@@ -563,7 +561,7 @@
const unsigned int WatchDog::kWatchDogTimeoutSeconds;
static int dex2oat(int argc, char** argv) {
- TimingLogger timings("compiler", false);
+ base::TimingLogger timings("compiler", false, false);
InitLogging(argv);
@@ -928,6 +926,7 @@
}
}
+ timings.StartSplit("dex2oat Setup");
UniquePtr<const CompilerDriver> compiler(dex2oat->CreateOatFile(boot_image_option,
host_prefix.get(),
android_root,
@@ -998,13 +997,13 @@
// Elf32_Phdr.p_vaddr values by the desired base address.
//
if (image) {
+ timings.NewSplit("dex2oat ImageWriter");
Thread::Current()->TransitionFromRunnableToSuspended(kNative);
bool image_creation_success = dex2oat->CreateImageFile(image_filename,
image_base,
oat_unstripped,
oat_location,
*compiler.get());
- timings.AddSplit("dex2oat ImageWriter");
Thread::Current()->TransitionFromSuspendedToRunnable();
if (!image_creation_success) {
return EXIT_FAILURE;
@@ -1014,7 +1013,7 @@
if (is_host) {
if (dump_timings && timings.GetTotalNs() > MsToNs(1000)) {
- LOG(INFO) << Dumpable<TimingLogger>(timings);
+ LOG(INFO) << Dumpable<base::TimingLogger>(timings);
}
return EXIT_SUCCESS;
}
@@ -1022,6 +1021,7 @@
// If we don't want to strip in place, copy from unstripped location to stripped location.
// We need to strip after image creation because FixupElf needs to use .strtab.
if (oat_unstripped != oat_stripped) {
+ timings.NewSplit("dex2oat OatFile copy");
oat_file.reset();
UniquePtr<File> in(OS::OpenFile(oat_unstripped.c_str(), false));
UniquePtr<File> out(OS::OpenFile(oat_stripped.c_str(), true));
@@ -1036,23 +1036,25 @@
CHECK(write_ok);
}
oat_file.reset(out.release());
- timings.AddSplit("dex2oat OatFile copy");
LOG(INFO) << "Oat file copied successfully (stripped): " << oat_stripped;
}
#if ART_USE_PORTABLE_COMPILER // We currently only generate symbols on Portable
+ timings.NewSplit("dex2oat ElfStripper");
// Strip unneeded sections for target
off_t seek_actual = lseek(oat_file->Fd(), 0, SEEK_SET);
CHECK_EQ(0, seek_actual);
ElfStripper::Strip(oat_file.get());
- timings.AddSplit("dex2oat ElfStripper");
+
// We wrote the oat file successfully, and want to keep it.
LOG(INFO) << "Oat file written successfully (stripped): " << oat_location;
#endif // ART_USE_PORTABLE_COMPILER
+ timings.EndSplit();
+
if (dump_timings && timings.GetTotalNs() > MsToNs(1000)) {
- LOG(INFO) << Dumpable<TimingLogger>(timings);
+ LOG(INFO) << Dumpable<base::TimingLogger>(timings);
}
return EXIT_SUCCESS;
}
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 7734aa5..51bb3eb 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -111,13 +111,6 @@
native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc \
native/sun_misc_Unsafe.cc \
oat.cc \
- oat/utils/arm/assembler_arm.cc \
- oat/utils/arm/managed_register_arm.cc \
- oat/utils/assembler.cc \
- oat/utils/mips/assembler_mips.cc \
- oat/utils/mips/managed_register_mips.cc \
- oat/utils/x86/assembler_x86.cc \
- oat/utils/x86/managed_register_x86.cc \
oat_file.cc \
offsets.cc \
os_linux.cc \
@@ -125,8 +118,6 @@
reference_table.cc \
reflection.cc \
runtime.cc \
- runtime_support.cc \
- runtime_support_llvm.cc \
signal_catcher.cc \
stack.cc \
thread.cc \
@@ -136,7 +127,6 @@
trace.cc \
utf.cc \
utils.cc \
- vector_output_stream.cc \
verifier/dex_gc_map.cc \
verifier/instruction_flags.cc \
verifier/method_verifier.cc \
@@ -147,23 +137,41 @@
zip_archive.cc
LIBART_COMMON_SRC_FILES += \
- oat/runtime/context.cc \
- oat/runtime/support_alloc.cc \
- oat/runtime/support_cast.cc \
- oat/runtime/support_deoptimize.cc \
- oat/runtime/support_dexcache.cc \
- oat/runtime/support_field.cc \
- oat/runtime/support_fillarray.cc \
- oat/runtime/support_instrumentation.cc \
- oat/runtime/support_invoke.cc \
- oat/runtime/support_jni.cc \
- oat/runtime/support_locks.cc \
- oat/runtime/support_math.cc \
- oat/runtime/support_proxy.cc \
- oat/runtime/support_stubs.cc \
- oat/runtime/support_thread.cc \
- oat/runtime/support_throw.cc \
- oat/runtime/support_interpreter.cc
+ arch/context.cc \
+ arch/arm/registers_arm.cc \
+ arch/x86/registers_x86.cc \
+ arch/mips/registers_mips.cc \
+ entrypoints/entrypoint_utils.cc \
+ entrypoints/jni/jni_entrypoints.cc \
+ entrypoints/math_entrypoints.cc \
+ entrypoints/portable/portable_alloc_entrypoints.cc \
+ entrypoints/portable/portable_cast_entrypoints.cc \
+ entrypoints/portable/portable_dexcache_entrypoints.cc \
+ entrypoints/portable/portable_field_entrypoints.cc \
+ entrypoints/portable/portable_fillarray_entrypoints.cc \
+ entrypoints/portable/portable_invoke_entrypoints.cc \
+ entrypoints/portable/portable_jni_entrypoints.cc \
+ entrypoints/portable/portable_lock_entrypoints.cc \
+ entrypoints/portable/portable_proxy_entrypoints.cc \
+ entrypoints/portable/portable_stub_entrypoints.cc \
+ entrypoints/portable/portable_thread_entrypoints.cc \
+ entrypoints/portable/portable_throw_entrypoints.cc \
+ entrypoints/quick/quick_alloc_entrypoints.cc \
+ entrypoints/quick/quick_cast_entrypoints.cc \
+ entrypoints/quick/quick_deoptimization_entrypoints.cc \
+ entrypoints/quick/quick_dexcache_entrypoints.cc \
+ entrypoints/quick/quick_field_entrypoints.cc \
+ entrypoints/quick/quick_fillarray_entrypoints.cc \
+ entrypoints/quick/quick_instrumentation_entrypoints.cc \
+ entrypoints/quick/quick_interpreter_entrypoints.cc \
+ entrypoints/quick/quick_invoke_entrypoints.cc \
+ entrypoints/quick/quick_jni_entrypoints.cc \
+ entrypoints/quick/quick_lock_entrypoints.cc \
+ entrypoints/quick/quick_math_entrypoints.cc \
+ entrypoints/quick/quick_proxy_entrypoints.cc \
+ entrypoints/quick/quick_stub_entrypoints.cc \
+ entrypoints/quick/quick_thread_entrypoints.cc \
+ entrypoints/quick/quick_throw_entrypoints.cc
LIBART_TARGET_SRC_FILES := \
$(LIBART_COMMON_SRC_FILES) \
@@ -175,40 +183,36 @@
ifeq ($(TARGET_ARCH),arm)
LIBART_TARGET_SRC_FILES += \
- oat/runtime/arm/context_arm.cc.arm \
- oat/runtime/arm/oat_support_entrypoints_arm.cc \
- oat/runtime/arm/runtime_support_arm.S
+ arch/arm/context_arm.cc.arm \
+ arch/arm/entrypoints_init_arm.cc \
+ arch/arm/jni_entrypoints_arm.S \
+ arch/arm/portable_entrypoints_arm.S \
+ arch/arm/quick_entrypoints_arm.S \
+ arch/arm/thread_arm.cc
else # TARGET_ARCH != arm
ifeq ($(TARGET_ARCH),x86)
LIBART_TARGET_SRC_FILES += \
- oat/runtime/x86/context_x86.cc \
- oat/runtime/x86/oat_support_entrypoints_x86.cc \
- oat/runtime/x86/runtime_support_x86.S
+ arch/x86/context_x86.cc \
+ arch/x86/entrypoints_init_x86.cc \
+ arch/x86/jni_entrypoints_x86.S \
+ arch/x86/portable_entrypoints_x86.S \
+ arch/x86/quick_entrypoints_x86.S \
+ arch/x86/thread_x86.cc
else # TARGET_ARCH != x86
ifeq ($(TARGET_ARCH),mips)
LIBART_TARGET_SRC_FILES += \
- oat/runtime/mips/context_mips.cc \
- oat/runtime/mips/oat_support_entrypoints_mips.cc \
- oat/runtime/mips/runtime_support_mips.S
+ arch/mips/context_mips.cc \
+ arch/mips/entrypoints_init_mips.cc \
+ arch/mips/jni_entrypoints_mips.S \
+ arch/mips/portable_entrypoints_mips.S \
+ arch/mips/quick_entrypoints_mips.S \
+ arch/mips/thread_mips.cc
else # TARGET_ARCH != mips
$(error unsupported TARGET_ARCH=$(TARGET_ARCH))
endif # TARGET_ARCH != mips
endif # TARGET_ARCH != x86
endif # TARGET_ARCH != arm
-ifeq ($(TARGET_ARCH),arm)
-LIBART_TARGET_SRC_FILES += thread_arm.cc
-else # TARGET_ARCH != arm
-ifeq ($(TARGET_ARCH),x86)
-LIBART_TARGET_SRC_FILES += thread_x86.cc
-else # TARGET_ARCH != x86
-ifeq ($(TARGET_ARCH),mips)
-LIBART_TARGET_SRC_FILES += thread_mips.cc
-else # TARGET_ARCH != mips
-$(error unsupported TARGET_ARCH=$(TARGET_ARCH))
-endif # TARGET_ARCH != mips
-endif # TARGET_ARCH != x86
-endif # TARGET_ARCH != arm
LIBART_HOST_SRC_FILES := \
$(LIBART_COMMON_SRC_FILES) \
@@ -219,15 +223,12 @@
ifeq ($(HOST_ARCH),x86)
LIBART_HOST_SRC_FILES += \
- oat/runtime/x86/context_x86.cc \
- oat/runtime/x86/oat_support_entrypoints_x86.cc \
- oat/runtime/x86/runtime_support_x86.S
-else # HOST_ARCH != x86
-$(error unsupported HOST_ARCH=$(HOST_ARCH))
-endif # HOST_ARCH != x86
-
-ifeq ($(HOST_ARCH),x86)
-LIBART_HOST_SRC_FILES += thread_x86.cc
+ arch/x86/context_x86.cc \
+ arch/x86/entrypoints_init_x86.cc \
+ arch/x86/jni_entrypoints_x86.S \
+ arch/x86/portable_entrypoints_x86.S \
+ arch/x86/quick_entrypoints_x86.S \
+ arch/x86/thread_x86.cc
else # HOST_ARCH != x86
$(error unsupported HOST_ARCH=$(HOST_ARCH))
endif # HOST_ARCH != x86
diff --git a/runtime/arch/arm/asm_support_arm.S b/runtime/arch/arm/asm_support_arm.S
new file mode 100644
index 0000000..ed655e9
--- /dev/null
+++ b/runtime/arch/arm/asm_support_arm.S
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_
+#define ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_
+
+#include "asm_support_arm.h"
+
+.macro ENTRY name
+ .type \name, #function
+ .global \name
+ /* Cache alignment for function entry */
+ .balign 16
+\name:
+ .cfi_startproc
+ .fnstart
+.endm
+
+.macro END name
+ .fnend
+ .cfi_endproc
+ .size \name, .-\name
+.endm
+
+#endif // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_
diff --git a/runtime/arch/arm/asm_support_arm.h b/runtime/arch/arm/asm_support_arm.h
new file mode 100644
index 0000000..ed3d476
--- /dev/null
+++ b/runtime/arch/arm/asm_support_arm.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_ARM_ASM_SUPPORT_ARM_H_
+#define ART_RUNTIME_ARCH_ARM_ASM_SUPPORT_ARM_H_
+
+#include "asm_support.h"
+
+// Register holding suspend check count down.
+#define rSUSPEND r4
+// Register holding Thread::Current().
+#define rSELF r9
+// Offset of field Thread::suspend_count_ verified in InitCpu
+#define THREAD_FLAGS_OFFSET 0
+// Offset of field Thread::exception_ verified in InitCpu
+#define THREAD_EXCEPTION_OFFSET 12
+
+#endif // ART_RUNTIME_ARCH_ARM_ASM_SUPPORT_ARM_H_
diff --git a/runtime/oat/runtime/arm/context_arm.cc b/runtime/arch/arm/context_arm.cc
similarity index 100%
rename from runtime/oat/runtime/arm/context_arm.cc
rename to runtime/arch/arm/context_arm.cc
diff --git a/runtime/oat/runtime/arm/context_arm.h b/runtime/arch/arm/context_arm.h
similarity index 84%
rename from runtime/oat/runtime/arm/context_arm.h
rename to runtime/arch/arm/context_arm.h
index 0be85e3..00651ff 100644
--- a/runtime/oat/runtime/arm/context_arm.h
+++ b/runtime/arch/arm/context_arm.h
@@ -14,12 +14,13 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_OAT_RUNTIME_ARM_CONTEXT_ARM_H_
-#define ART_RUNTIME_OAT_RUNTIME_ARM_CONTEXT_ARM_H_
+#ifndef ART_RUNTIME_ARCH_ARM_CONTEXT_ARM_H_
+#define ART_RUNTIME_ARCH_ARM_CONTEXT_ARM_H_
#include "locks.h"
-#include "constants_arm.h"
-#include "oat/runtime/context.h"
+#include "arch/context.h"
+#include "base/logging.h"
+#include "registers_arm.h"
namespace art {
namespace arm {
@@ -45,7 +46,7 @@
}
virtual uintptr_t GetGPR(uint32_t reg) {
- CHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
+ DCHECK_LT(reg, static_cast<uint32_t>(kNumberOfCoreRegisters));
return *gprs_[reg];
}
@@ -64,4 +65,4 @@
} // namespace arm
} // namespace art
-#endif // ART_RUNTIME_OAT_RUNTIME_ARM_CONTEXT_ARM_H_
+#endif // ART_RUNTIME_ARCH_ARM_CONTEXT_ARM_H_
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
new file mode 100644
index 0000000..b71a158
--- /dev/null
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "entrypoints/portable/portable_entrypoints.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "entrypoints/entrypoint_utils.h"
+#include "entrypoints/math_entrypoints.h"
+
+namespace art {
+
+// Alloc entrypoints.
+extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method);
+extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method);
+extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t);
+extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
+
+// Cast entrypoints.
+extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass,
+ const mirror::Class* ref_class);
+extern "C" void art_quick_can_put_array_element_from_code(void*, void*);
+extern "C" void art_quick_check_cast_from_code(void*, void*);
+
+// DexCache entrypoints.
+extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*);
+extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*);
+extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*);
+extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t);
+
+// Exception entrypoints.
+extern "C" void* GetAndClearException(Thread*);
+
+// Field entrypoints.
+extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t);
+extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t);
+extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t);
+extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t);
+extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*);
+extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*);
+extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*);
+extern "C" int32_t art_quick_get32_static_from_code(uint32_t);
+extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*);
+extern "C" int64_t art_quick_get64_static_from_code(uint32_t);
+extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*);
+extern "C" void* art_quick_get_obj_static_from_code(uint32_t);
+
+// FillArray entrypoint.
+extern "C" void art_quick_handle_fill_data_from_code(void*, void*);
+
+// Lock entrypoints.
+extern "C" void art_quick_lock_object_from_code(void*);
+extern "C" void art_quick_unlock_object_from_code(void*);
+
+// Math entrypoints.
+extern int32_t CmpgDouble(double a, double b);
+extern int32_t CmplDouble(double a, double b);
+extern int32_t CmpgFloat(float a, float b);
+extern int32_t CmplFloat(float a, float b);
+
+// Math conversions.
+extern "C" int32_t __aeabi_f2iz(float op1); // FLOAT_TO_INT
+extern "C" int32_t __aeabi_d2iz(double op1); // DOUBLE_TO_INT
+extern "C" float __aeabi_l2f(int64_t op1); // LONG_TO_FLOAT
+extern "C" double __aeabi_l2d(int64_t op1); // LONG_TO_DOUBLE
+
+// Single-precision FP arithmetics.
+extern "C" float fmodf(float a, float b); // REM_FLOAT[_2ADDR]
+
+// Double-precision FP arithmetics.
+extern "C" double fmod(double a, double b); // REM_DOUBLE[_2ADDR]
+
+// Integer arithmetics.
+extern "C" int __aeabi_idivmod(int32_t, int32_t); // [DIV|REM]_INT[_2ADDR|_LIT8|_LIT16]
+
+// Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR]
+extern "C" int64_t __aeabi_ldivmod(int64_t, int64_t);
+extern "C" int64_t art_quick_mul_long(int64_t, int64_t);
+extern "C" uint64_t art_quick_shl_long(uint64_t, uint32_t);
+extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t);
+extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t);
+
+// Interpreter entrypoints.
+extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result);
+extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result);
+
+// Intrinsic entrypoints.
+extern "C" int32_t __memcmp16(void*, void*, int32_t);
+extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
+extern "C" int32_t art_quick_string_compareto(void*, void*);
+
+// Invoke entrypoints.
+extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called,
+ mirror::Object* receiver,
+ mirror::AbstractMethod** sp, Thread* thread);
+extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called,
+ mirror::Object* receiver,
+ mirror::AbstractMethod** sp, Thread* thread);
+extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*);
+extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*);
+
+// Thread entrypoints.
+extern void CheckSuspendFromCode(Thread* thread);
+extern "C" void art_quick_test_suspend();
+
+// Throw entrypoints.
+extern "C" void art_quick_deliver_exception_from_code(void*);
+extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit);
+extern "C" void art_quick_throw_div_zero_from_code();
+extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx);
+extern "C" void art_quick_throw_null_pointer_exception_from_code();
+extern "C" void art_quick_throw_stack_overflow_from_code(void*);
+
+void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) {
+ // Alloc
+ qpoints->pAllocArrayFromCode = art_quick_alloc_array_from_code;
+ qpoints->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check;
+ qpoints->pAllocObjectFromCode = art_quick_alloc_object_from_code;
+ qpoints->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check;
+ qpoints->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code;
+ qpoints->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check;
+
+ // Cast
+ qpoints->pInstanceofNonTrivialFromCode = artIsAssignableFromCode;
+ qpoints->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code;
+ qpoints->pCheckCastFromCode = art_quick_check_cast_from_code;
+
+ // DexCache
+ qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code;
+ qpoints->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code;
+ qpoints->pInitializeTypeFromCode = art_quick_initialize_type_from_code;
+ qpoints->pResolveStringFromCode = art_quick_resolve_string_from_code;
+
+ // Field
+ qpoints->pSet32Instance = art_quick_set32_instance_from_code;
+ qpoints->pSet32Static = art_quick_set32_static_from_code;
+ qpoints->pSet64Instance = art_quick_set64_instance_from_code;
+ qpoints->pSet64Static = art_quick_set64_static_from_code;
+ qpoints->pSetObjInstance = art_quick_set_obj_instance_from_code;
+ qpoints->pSetObjStatic = art_quick_set_obj_static_from_code;
+ qpoints->pGet32Instance = art_quick_get32_instance_from_code;
+ qpoints->pGet64Instance = art_quick_get64_instance_from_code;
+ qpoints->pGetObjInstance = art_quick_get_obj_instance_from_code;
+ qpoints->pGet32Static = art_quick_get32_static_from_code;
+ qpoints->pGet64Static = art_quick_get64_static_from_code;
+ qpoints->pGetObjStatic = art_quick_get_obj_static_from_code;
+
+ // FillArray
+ qpoints->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code;
+
+ // JNI
+ qpoints->pJniMethodStart = JniMethodStart;
+ qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized;
+ qpoints->pJniMethodEnd = JniMethodEnd;
+ qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized;
+ qpoints->pJniMethodEndWithReference = JniMethodEndWithReference;
+ qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
+
+ // Locks
+ qpoints->pLockObjectFromCode = art_quick_lock_object_from_code;
+ qpoints->pUnlockObjectFromCode = art_quick_unlock_object_from_code;
+
+ // Math
+ qpoints->pCmpgDouble = CmpgDouble;
+ qpoints->pCmpgFloat = CmpgFloat;
+ qpoints->pCmplDouble = CmplDouble;
+ qpoints->pCmplFloat = CmplFloat;
+ qpoints->pFmod = fmod;
+ qpoints->pSqrt = sqrt;
+ qpoints->pL2d = __aeabi_l2d;
+ qpoints->pFmodf = fmodf;
+ qpoints->pL2f = __aeabi_l2f;
+ qpoints->pD2iz = __aeabi_d2iz;
+ qpoints->pF2iz = __aeabi_f2iz;
+ qpoints->pIdivmod = __aeabi_idivmod;
+ qpoints->pD2l = art_d2l;
+ qpoints->pF2l = art_f2l;
+ qpoints->pLdiv = __aeabi_ldivmod;
+ qpoints->pLdivmod = __aeabi_ldivmod; // result returned in r2:r3
+ qpoints->pLmul = art_quick_mul_long;
+ qpoints->pShlLong = art_quick_shl_long;
+ qpoints->pShrLong = art_quick_shr_long;
+ qpoints->pUshrLong = art_quick_ushr_long;
+
+ // Interpreter
+ qpoints->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry;
+ qpoints->pInterpreterToQuickEntry = artInterpreterToQuickEntry;
+
+ // Intrinsics
+ qpoints->pIndexOf = art_quick_indexof;
+ qpoints->pMemcmp16 = __memcmp16;
+ qpoints->pStringCompareTo = art_quick_string_compareto;
+ qpoints->pMemcpy = memcpy;
+
+ // Invocation
+ qpoints->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline;
+ qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
+ qpoints->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline;
+ qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
+ qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
+ qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
+ qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+
+ // Thread
+ qpoints->pCheckSuspendFromCode = CheckSuspendFromCode;
+ qpoints->pTestSuspendFromCode = art_quick_test_suspend;
+
+ // Throws
+ qpoints->pDeliverException = art_quick_deliver_exception_from_code;
+ qpoints->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code;
+ qpoints->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code;
+ qpoints->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code;
+ qpoints->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code;
+ qpoints->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code;
+
+ // Portable
+ ppoints->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline;
+};
+
+} // namespace art
diff --git a/runtime/arch/arm/jni_entrypoints_arm.S b/runtime/arch/arm/jni_entrypoints_arm.S
new file mode 100644
index 0000000..0a0d06a
--- /dev/null
+++ b/runtime/arch/arm/jni_entrypoints_arm.S
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "asm_support_arm.S"
+
+ /*
+ * Jni dlsym lookup stub.
+ */
+ .extern artFindNativeMethod
+ENTRY art_jni_dlsym_lookup_stub
+ push {r0, r1, r2, r3, lr} @ spill regs
+ .save {r0, r1, r2, r3, lr}
+ .pad #20
+ .cfi_adjust_cfa_offset 20
+ sub sp, #12 @ pad stack pointer to align frame
+ .pad #12
+ .cfi_adjust_cfa_offset 12
+ mov r0, r9 @ pass Thread::Current
+ blx artFindNativeMethod @ (Thread*)
+ mov r12, r0 @ save result in r12
+ add sp, #12 @ restore stack pointer
+ .cfi_adjust_cfa_offset -12
+ pop {r0, r1, r2, r3, lr} @ restore regs
+ .cfi_adjust_cfa_offset -20
+ cmp r12, #0 @ is method code null?
+ bxne r12 @ if non-null, tail call to method's code
+ bx lr @ otherwise, return to caller to handle exception
+END art_jni_dlsym_lookup_stub
+
+ /*
+ * Entry point of native methods when JNI bug compatibility is enabled.
+ */
+ .extern artWorkAroundAppJniBugs
+ENTRY art_quick_work_around_app_jni_bugs
+ @ save registers that may contain arguments and LR that will be crushed by a call
+ push {r0-r3, lr}
+ .save {r0-r3, lr}
+ .cfi_adjust_cfa_offset 16
+ .cfi_rel_offset r0, 0
+ .cfi_rel_offset r1, 4
+ .cfi_rel_offset r2, 8
+ .cfi_rel_offset r3, 12
+ sub sp, #12 @ 3 words of space for alignment
+ mov r0, r9 @ pass Thread::Current
+ mov r1, sp @ pass SP
+ bl artWorkAroundAppJniBugs @ (Thread*, SP)
+ add sp, #12 @ rewind stack
+ mov r12, r0 @ save target address
+ pop {r0-r3, lr} @ restore possibly modified argument registers
+ .cfi_adjust_cfa_offset -16
+ bx r12 @ tail call into JNI routine
+END art_quick_work_around_app_jni_bugs
diff --git a/runtime/arch/arm/portable_entrypoints_arm.S b/runtime/arch/arm/portable_entrypoints_arm.S
new file mode 100644
index 0000000..4cc6654
--- /dev/null
+++ b/runtime/arch/arm/portable_entrypoints_arm.S
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "asm_support_arm.S"
+
+ /*
+ * Portable invocation stub.
+ * On entry:
+ * r0 = method pointer
+ * r1 = argument array or NULL for no argument methods
+ * r2 = size of argument array in bytes
+ * r3 = (managed) thread pointer
+ * [sp] = JValue* result
+ * [sp + 4] = result type char
+ */
+ENTRY art_portable_invoke_stub
+ push {r0, r4, r5, r9, r11, lr} @ spill regs
+ .save {r0, r4, r5, r9, r11, lr}
+ .pad #24
+ .cfi_adjust_cfa_offset 24
+ .cfi_rel_offset r0, 0
+ .cfi_rel_offset r4, 4
+ .cfi_rel_offset r5, 8
+ .cfi_rel_offset r9, 12
+ .cfi_rel_offset r11, 16
+ .cfi_rel_offset lr, 20
+ mov r11, sp @ save the stack pointer
+ .cfi_def_cfa_register r11
+ mov r9, r3 @ move managed thread pointer into r9
+ mov r4, #SUSPEND_CHECK_INTERVAL @ reset r4 to suspend check interval
+ add r5, r2, #16 @ create space for method pointer in frame
+ and r5, #0xFFFFFFF0 @ align frame size to 16 bytes
+ sub sp, r5 @ reserve stack space for argument array
+ add r0, sp, #4 @ pass stack pointer + method ptr as dest for memcpy
+ bl memcpy @ memcpy (dest, src, bytes)
+ ldr r0, [r11] @ restore method*
+ ldr r1, [sp, #4] @ copy arg value for r1
+ ldr r2, [sp, #8] @ copy arg value for r2
+ ldr r3, [sp, #12] @ copy arg value for r3
+ mov ip, #0 @ set ip to 0
+ str ip, [sp] @ store NULL for method* at bottom of frame
+ add sp, #16 @ first 4 args are not passed on stack for portable
+ ldr ip, [r0, #METHOD_CODE_OFFSET] @ get pointer to the code
+ blx ip @ call the method
+ mov sp, r11 @ restore the stack pointer
+ ldr ip, [sp, #24] @ load the result pointer
+ strd r0, [ip] @ store r0/r1 into result pointer
+ pop {r0, r4, r5, r9, r11, lr} @ restore spill regs
+ .cfi_adjust_cfa_offset -24
+ bx lr
+END art_portable_invoke_stub
+
+ .extern artPortableProxyInvokeHandler
+ENTRY art_portable_proxy_invoke_handler
+ @ Fake callee save ref and args frame set up, note portable doesn't use callee save frames.
+ @ TODO: just save the registers that are needed in artPortableProxyInvokeHandler.
+ push {r1-r3, r5-r8, r10-r11, lr} @ 10 words of callee saves
+ .save {r1-r3, r5-r8, r10-r11, lr}
+ .cfi_adjust_cfa_offset 40
+ .cfi_rel_offset r1, 0
+ .cfi_rel_offset r2, 4
+ .cfi_rel_offset r3, 8
+ .cfi_rel_offset r5, 12
+ .cfi_rel_offset r6, 16
+ .cfi_rel_offset r7, 20
+ .cfi_rel_offset r8, 24
+ .cfi_rel_offset r10, 28
+ .cfi_rel_offset r11, 32
+ .cfi_rel_offset lr, 36
+ sub sp, #8 @ 2 words of space, bottom word will hold Method*
+ .pad #8
+ .cfi_adjust_cfa_offset 8
+ @ Begin argument set up.
+ str r0, [sp, #0] @ place proxy method at bottom of frame
+ mov r2, r9 @ pass Thread::Current
+ mov r3, sp @ pass SP
+ blx artPortableProxyInvokeHandler @ (Method* proxy method, receiver, Thread*, SP)
+ ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
+ ldr lr, [sp, #44] @ restore lr
+ add sp, #48 @ pop frame
+ .cfi_adjust_cfa_offset -48
+ bx lr @ return
+END art_portable_proxy_invoke_handler
diff --git a/runtime/oat/runtime/arm/runtime_support_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
similarity index 90%
rename from runtime/oat/runtime/arm/runtime_support_arm.S
rename to runtime/arch/arm/quick_entrypoints_arm.S
index f19e8ba..9b8d238 100644
--- a/runtime/oat/runtime/arm/runtime_support_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -14,29 +14,13 @@
* limitations under the License.
*/
-#include "asm_support.h"
+#include "asm_support_arm.S"
/* Deliver the given exception */
.extern artDeliverExceptionFromCode
/* Deliver an exception pending on a thread */
.extern artDeliverPendingException
-.macro ENTRY name
- .type \name, #function
- .global \name
- /* Cache alignment for function entry */
- .balign 16
-\name:
- .cfi_startproc
- .fnstart
-.endm
-
-.macro END name
- .fnend
- .cfi_endproc
- .size \name, .-\name
-.endm
-
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kSaveAll)
@@ -247,53 +231,6 @@
INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
/*
- * Portable invocation stub.
- * On entry:
- * r0 = method pointer
- * r1 = argument array or NULL for no argument methods
- * r2 = size of argument array in bytes
- * r3 = (managed) thread pointer
- * [sp] = JValue* result
- * [sp + 4] = result type char
- */
-ENTRY art_portable_invoke_stub
- push {r0, r4, r5, r9, r11, lr} @ spill regs
- .save {r0, r4, r5, r9, r11, lr}
- .pad #24
- .cfi_adjust_cfa_offset 24
- .cfi_rel_offset r0, 0
- .cfi_rel_offset r4, 4
- .cfi_rel_offset r5, 8
- .cfi_rel_offset r9, 12
- .cfi_rel_offset r11, 16
- .cfi_rel_offset lr, 20
- mov r11, sp @ save the stack pointer
- .cfi_def_cfa_register r11
- mov r9, r3 @ move managed thread pointer into r9
- mov r4, #SUSPEND_CHECK_INTERVAL @ reset r4 to suspend check interval
- add r5, r2, #16 @ create space for method pointer in frame
- and r5, #0xFFFFFFF0 @ align frame size to 16 bytes
- sub sp, r5 @ reserve stack space for argument array
- add r0, sp, #4 @ pass stack pointer + method ptr as dest for memcpy
- bl memcpy @ memcpy (dest, src, bytes)
- ldr r0, [r11] @ restore method*
- ldr r1, [sp, #4] @ copy arg value for r1
- ldr r2, [sp, #8] @ copy arg value for r2
- ldr r3, [sp, #12] @ copy arg value for r3
- mov ip, #0 @ set ip to 0
- str ip, [sp] @ store NULL for method* at bottom of frame
- add sp, #16 @ first 4 args are not passed on stack for portable
- ldr ip, [r0, #METHOD_CODE_OFFSET] @ get pointer to the code
- blx ip @ call the method
- mov sp, r11 @ restore the stack pointer
- ldr ip, [sp, #24] @ load the result pointer
- strd r0, [ip] @ store r0/r1 into result pointer
- pop {r0, r4, r5, r9, r11, lr} @ restore spill regs
- .cfi_adjust_cfa_offset -24
- bx lr
-END art_portable_invoke_stub
-
- /*
* Quick invocation stub.
* On entry:
* r0 = method pointer
@@ -353,30 +290,6 @@
END art_quick_do_long_jump
/*
- * Entry point of native methods when JNI bug compatibility is enabled.
- */
- .extern artWorkAroundAppJniBugs
-ENTRY art_quick_work_around_app_jni_bugs
- @ save registers that may contain arguments and LR that will be crushed by a call
- push {r0-r3, lr}
- .save {r0-r3, lr}
- .cfi_adjust_cfa_offset 16
- .cfi_rel_offset r0, 0
- .cfi_rel_offset r1, 4
- .cfi_rel_offset r2, 8
- .cfi_rel_offset r3, 12
- sub sp, #12 @ 3 words of space for alignment
- mov r0, r9 @ pass Thread::Current
- mov r1, sp @ pass SP
- bl artWorkAroundAppJniBugs @ (Thread*, SP)
- add sp, #12 @ rewind stack
- mov r12, r0 @ save target address
- pop {r0-r3, lr} @ restore possibly modified argument registers
- .cfi_adjust_cfa_offset -16
- bx r12 @ tail call into JNI routine
-END art_quick_work_around_app_jni_bugs
-
- /*
* Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on
* failure.
*/
@@ -906,20 +819,6 @@
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
END art_quick_test_suspend
- .extern artPortableProxyInvokeHandler
-ENTRY art_portable_proxy_invoke_handler
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
- str r0, [sp, #0] @ place proxy method at bottom of frame
- mov r2, r9 @ pass Thread::Current
- mov r3, sp @ pass SP
- blx artPortableProxyInvokeHandler @ (Method* proxy method, receiver, Thread*, SP)
- ldr r12, [r9, #THREAD_EXCEPTION_OFFSET] @ load Thread::Current()->exception_
- ldr lr, [sp, #44] @ restore lr
- add sp, #48 @ pop frame
- .cfi_adjust_cfa_offset -48
- bx lr @ return
-END art_portable_proxy_invoke_handler
-
/*
* Called by managed code that is attempting to call a method on a proxy class. On entry
* r0 holds the proxy method and r1 holds the receiver; r2 and r3 may contain arguments. The
@@ -1045,30 +944,6 @@
END art_quick_abstract_method_error_stub
/*
- * Jni dlsym lookup stub.
- */
- .extern artFindNativeMethod
-ENTRY art_jni_dlsym_lookup_stub
- push {r0, r1, r2, r3, lr} @ spill regs
- .save {r0, r1, r2, r3, lr}
- .pad #20
- .cfi_adjust_cfa_offset 20
- sub sp, #12 @ pad stack pointer to align frame
- .pad #12
- .cfi_adjust_cfa_offset 12
- mov r0, r9 @ pass Thread::Current
- blx artFindNativeMethod @ (Thread*)
- mov r12, r0 @ save result in r12
- add sp, #12 @ restore stack pointer
- .cfi_adjust_cfa_offset -12
- pop {r0, r1, r2, r3, lr} @ restore regs
- .cfi_adjust_cfa_offset -20
- cmp r12, #0 @ is method code null?
- bxne r12 @ if non-null, tail call to method's code
- bx lr @ otherwise, return to caller to handle exception
-END art_jni_dlsym_lookup_stub
-
- /*
* Signed 64-bit integer multiply.
*
* Consider WXxYZ (r1r0 x r3r2) with a long multiply:
diff --git a/runtime/arch/arm/registers_arm.cc b/runtime/arch/arm/registers_arm.cc
new file mode 100644
index 0000000..4f04647
--- /dev/null
+++ b/runtime/arch/arm/registers_arm.cc
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "registers_arm.h"
+
+#include <ostream>
+
+namespace art {
+namespace arm {
+
+static const char* kRegisterNames[] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
+ "fp", "ip", "sp", "lr", "pc"
+};
+std::ostream& operator<<(std::ostream& os, const Register& rhs) {
+ if (rhs >= R0 && rhs <= PC) {
+ os << kRegisterNames[rhs];
+ } else {
+ os << "Register[" << static_cast<int>(rhs) << "]";
+ }
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const SRegister& rhs) {
+ if (rhs >= S0 && rhs < kNumberOfSRegisters) {
+ os << "s" << static_cast<int>(rhs);
+ } else {
+ os << "SRegister[" << static_cast<int>(rhs) << "]";
+ }
+ return os;
+}
+
+} // namespace arm
+} // namespace art
diff --git a/runtime/arch/arm/registers_arm.h b/runtime/arch/arm/registers_arm.h
new file mode 100644
index 0000000..932095d
--- /dev/null
+++ b/runtime/arch/arm/registers_arm.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_ARM_REGISTERS_ARM_H_
+#define ART_RUNTIME_ARCH_ARM_REGISTERS_ARM_H_
+
+#include <iosfwd>
+
+namespace art {
+namespace arm {
+
+// Values for registers.
+enum Register {
+ R0 = 0,
+ R1 = 1,
+ R2 = 2,
+ R3 = 3,
+ R4 = 4,
+ R5 = 5,
+ R6 = 6,
+ R7 = 7,
+ R8 = 8,
+ R9 = 9,
+ R10 = 10,
+ R11 = 11,
+ R12 = 12,
+ R13 = 13,
+ R14 = 14,
+ R15 = 15,
+ TR = 9, // thread register
+ FP = 11,
+ IP = 12,
+ SP = 13,
+ LR = 14,
+ PC = 15,
+ kNumberOfCoreRegisters = 16,
+ kNoRegister = -1,
+};
+std::ostream& operator<<(std::ostream& os, const Register& rhs);
+
+
+// Values for single-precision floating point registers.
+enum SRegister {
+ S0 = 0,
+ S1 = 1,
+ S2 = 2,
+ S3 = 3,
+ S4 = 4,
+ S5 = 5,
+ S6 = 6,
+ S7 = 7,
+ S8 = 8,
+ S9 = 9,
+ S10 = 10,
+ S11 = 11,
+ S12 = 12,
+ S13 = 13,
+ S14 = 14,
+ S15 = 15,
+ S16 = 16,
+ S17 = 17,
+ S18 = 18,
+ S19 = 19,
+ S20 = 20,
+ S21 = 21,
+ S22 = 22,
+ S23 = 23,
+ S24 = 24,
+ S25 = 25,
+ S26 = 26,
+ S27 = 27,
+ S28 = 28,
+ S29 = 29,
+ S30 = 30,
+ S31 = 31,
+ kNumberOfSRegisters = 32,
+ kNoSRegister = -1,
+};
+std::ostream& operator<<(std::ostream& os, const SRegister& rhs);
+
+} // namespace arm
+} // namespace art
+
+#endif // ART_RUNTIME_ARCH_ARM_REGISTERS_ARM_H_
diff --git a/runtime/thread_arm.cc b/runtime/arch/arm/thread_arm.cc
similarity index 93%
copy from runtime/thread_arm.cc
copy to runtime/arch/arm/thread_arm.cc
index 0ef26bf..ea908be 100644
--- a/runtime/thread_arm.cc
+++ b/runtime/arch/arm/thread_arm.cc
@@ -16,8 +16,8 @@
#include "thread.h"
-#include "asm_support.h"
-#include "base/macros.h"
+#include "asm_support_arm.h"
+#include "base/logging.h"
namespace art {
diff --git a/runtime/oat/runtime/context.cc b/runtime/arch/context.cc
similarity index 100%
rename from runtime/oat/runtime/context.cc
rename to runtime/arch/context.cc
diff --git a/runtime/oat/runtime/context.h b/runtime/arch/context.h
similarity index 93%
rename from runtime/oat/runtime/context.h
rename to runtime/arch/context.h
index ac43e9a..91e0cd6 100644
--- a/runtime/oat/runtime/context.h
+++ b/runtime/arch/context.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_OAT_RUNTIME_CONTEXT_H_
-#define ART_RUNTIME_OAT_RUNTIME_CONTEXT_H_
+#ifndef ART_RUNTIME_ARCH_CONTEXT_H_
+#define ART_RUNTIME_ARCH_CONTEXT_H_
#include <stddef.h>
#include <stdint.h>
@@ -67,4 +67,4 @@
} // namespace art
-#endif // ART_RUNTIME_OAT_RUNTIME_CONTEXT_H_
+#endif // ART_RUNTIME_ARCH_CONTEXT_H_
diff --git a/runtime/arch/mips/asm_support_mips.S b/runtime/arch/mips/asm_support_mips.S
new file mode 100644
index 0000000..8a34b9d
--- /dev/null
+++ b/runtime/arch/mips/asm_support_mips.S
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_
+#define ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_
+
+#include "asm_support_mips.h"
+
+ /* Cache alignment for function entry */
+.macro ENTRY name
+ .type \name, %function
+ .global \name
+ .balign 16
+\name:
+ .cfi_startproc
+.endm
+
+.macro END name
+ .cfi_endproc
+ .size \name, .-\name
+.endm
+
+ /* Generates $gp for function calls */
+.macro GENERATE_GLOBAL_POINTER
+ .cpload $t9
+.endm
+
+#endif // ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_
diff --git a/runtime/arch/mips/asm_support_mips.h b/runtime/arch/mips/asm_support_mips.h
new file mode 100644
index 0000000..9a66352
--- /dev/null
+++ b/runtime/arch/mips/asm_support_mips.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_
+#define ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_
+
+#include "asm_support.h"
+
+// Register holding suspend check count down.
+#define rSUSPEND $s0
+// Register holding Thread::Current().
+#define rSELF $s1
+// Offset of field Thread::suspend_count_ verified in InitCpu
+#define THREAD_FLAGS_OFFSET 0
+// Offset of field Thread::exception_ verified in InitCpu
+#define THREAD_EXCEPTION_OFFSET 12
+
+#endif // ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_H_
diff --git a/runtime/oat/runtime/mips/context_mips.cc b/runtime/arch/mips/context_mips.cc
similarity index 100%
rename from runtime/oat/runtime/mips/context_mips.cc
rename to runtime/arch/mips/context_mips.cc
diff --git a/runtime/oat/runtime/mips/context_mips.h b/runtime/arch/mips/context_mips.h
similarity index 87%
rename from runtime/oat/runtime/mips/context_mips.h
rename to runtime/arch/mips/context_mips.h
index f27124c..5595f86 100644
--- a/runtime/oat/runtime/mips/context_mips.h
+++ b/runtime/arch/mips/context_mips.h
@@ -14,11 +14,12 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_OAT_RUNTIME_MIPS_CONTEXT_MIPS_H_
-#define ART_RUNTIME_OAT_RUNTIME_MIPS_CONTEXT_MIPS_H_
+#ifndef ART_RUNTIME_ARCH_MIPS_CONTEXT_MIPS_H_
+#define ART_RUNTIME_ARCH_MIPS_CONTEXT_MIPS_H_
-#include "constants_mips.h"
-#include "oat/runtime/context.h"
+#include "arch/context.h"
+#include "base/logging.h"
+#include "registers_mips.h"
namespace art {
namespace mips {
@@ -61,4 +62,4 @@
} // namespace mips
} // namespace art
-#endif // ART_RUNTIME_OAT_RUNTIME_MIPS_CONTEXT_MIPS_H_
+#endif // ART_RUNTIME_ARCH_MIPS_CONTEXT_MIPS_H_
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
new file mode 100644
index 0000000..0a62a40
--- /dev/null
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -0,0 +1,242 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "entrypoints/portable/portable_entrypoints.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "entrypoints/entrypoint_utils.h"
+#include "entrypoints/math_entrypoints.h"
+
+namespace art {
+
+// Alloc entrypoints.
+extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method);
+extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method);
+extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t);
+extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
+
+// Cast entrypoints.
+extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass,
+ const mirror::Class* ref_class);
+extern "C" void art_quick_can_put_array_element_from_code(void*, void*);
+extern "C" void art_quick_check_cast_from_code(void*, void*);
+
+// DexCache entrypoints.
+extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*);
+extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*);
+extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*);
+extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t);
+
+// Exception entrypoints.
+extern "C" void* GetAndClearException(Thread*);
+
+// Field entrypoints.
+extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t);
+extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t);
+extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t);
+extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t);
+extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*);
+extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*);
+extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*);
+extern "C" int32_t art_quick_get32_static_from_code(uint32_t);
+extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*);
+extern "C" int64_t art_quick_get64_static_from_code(uint32_t);
+extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*);
+extern "C" void* art_quick_get_obj_static_from_code(uint32_t);
+
+// FillArray entrypoint.
+extern "C" void art_quick_handle_fill_data_from_code(void*, void*);
+
+// Lock entrypoints.
+extern "C" void art_quick_lock_object_from_code(void*);
+extern "C" void art_quick_unlock_object_from_code(void*);
+
+// Math entrypoints.
+extern int32_t CmpgDouble(double a, double b);
+extern int32_t CmplDouble(double a, double b);
+extern int32_t CmpgFloat(float a, float b);
+extern int32_t CmplFloat(float a, float b);
+extern "C" int64_t artLmulFromCode(int64_t a, int64_t b);
+extern "C" int64_t artLdivFromCode(int64_t a, int64_t b);
+extern "C" int64_t artLdivmodFromCode(int64_t a, int64_t b);
+
+// Math conversions.
+extern "C" int32_t __fixsfsi(float op1); // FLOAT_TO_INT
+extern "C" int32_t __fixdfsi(double op1); // DOUBLE_TO_INT
+extern "C" float __floatdisf(int64_t op1); // LONG_TO_FLOAT
+extern "C" double __floatdidf(int64_t op1); // LONG_TO_DOUBLE
+extern "C" int64_t __fixsfdi(float op1); // FLOAT_TO_LONG
+extern "C" int64_t __fixdfdi(double op1); // DOUBLE_TO_LONG
+
+// Single-precision FP arithmetics.
+extern "C" float fmodf(float a, float b); // REM_FLOAT[_2ADDR]
+
+// Double-precision FP arithmetics.
+extern "C" double fmod(double a, double b); // REM_DOUBLE[_2ADDR]
+
+// Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR]
+extern "C" int64_t __divdi3(int64_t, int64_t);
+extern "C" int64_t __moddi3(int64_t, int64_t);
+extern "C" uint64_t art_quick_shl_long(uint64_t, uint32_t);
+extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t);
+extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t);
+
+// Interpreter entrypoints.
+extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result);
+extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result);
+
+// Intrinsic entrypoints.
+extern "C" int32_t __memcmp16(void*, void*, int32_t);
+extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
+extern "C" int32_t art_quick_string_compareto(void*, void*);
+
+// Invoke entrypoints.
+extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called,
+ mirror::Object* receiver,
+ mirror::AbstractMethod** sp, Thread* thread);
+extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called,
+ mirror::Object* receiver,
+ mirror::AbstractMethod** sp, Thread* thread);
+extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*);
+extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*);
+
+// Thread entrypoints.
+extern void CheckSuspendFromCode(Thread* thread);
+extern "C" void art_quick_test_suspend();
+
+// Throw entrypoints.
+extern "C" void art_quick_deliver_exception_from_code(void*);
+extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit);
+extern "C" void art_quick_throw_div_zero_from_code();
+extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx);
+extern "C" void art_quick_throw_null_pointer_exception_from_code();
+extern "C" void art_quick_throw_stack_overflow_from_code(void*);
+
+void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) {
+ // Alloc
+ qpoints->pAllocArrayFromCode = art_quick_alloc_array_from_code;
+ qpoints->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check;
+ qpoints->pAllocObjectFromCode = art_quick_alloc_object_from_code;
+ qpoints->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check;
+ qpoints->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code;
+ qpoints->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check;
+
+ // Cast
+ qpoints->pInstanceofNonTrivialFromCode = artIsAssignableFromCode;
+ qpoints->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code;
+ qpoints->pCheckCastFromCode = art_quick_check_cast_from_code;
+
+ // DexCache
+ qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code;
+ qpoints->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code;
+ qpoints->pInitializeTypeFromCode = art_quick_initialize_type_from_code;
+ qpoints->pResolveStringFromCode = art_quick_resolve_string_from_code;
+
+ // Field
+ qpoints->pSet32Instance = art_quick_set32_instance_from_code;
+ qpoints->pSet32Static = art_quick_set32_static_from_code;
+ qpoints->pSet64Instance = art_quick_set64_instance_from_code;
+ qpoints->pSet64Static = art_quick_set64_static_from_code;
+ qpoints->pSetObjInstance = art_quick_set_obj_instance_from_code;
+ qpoints->pSetObjStatic = art_quick_set_obj_static_from_code;
+ qpoints->pGet32Instance = art_quick_get32_instance_from_code;
+ qpoints->pGet64Instance = art_quick_get64_instance_from_code;
+ qpoints->pGetObjInstance = art_quick_get_obj_instance_from_code;
+ qpoints->pGet32Static = art_quick_get32_static_from_code;
+ qpoints->pGet64Static = art_quick_get64_static_from_code;
+ qpoints->pGetObjStatic = art_quick_get_obj_static_from_code;
+
+ // FillArray
+ qpoints->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code;
+
+ // JNI
+ qpoints->pJniMethodStart = JniMethodStart;
+ qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized;
+ qpoints->pJniMethodEnd = JniMethodEnd;
+ qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized;
+ qpoints->pJniMethodEndWithReference = JniMethodEndWithReference;
+ qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
+
+ // Locks
+ qpoints->pLockObjectFromCode = art_quick_lock_object_from_code;
+ qpoints->pUnlockObjectFromCode = art_quick_unlock_object_from_code;
+
+ // Math
+ qpoints->pCmpgDouble = CmpgDouble;
+ qpoints->pCmpgFloat = CmpgFloat;
+ qpoints->pCmplDouble = CmplDouble;
+ qpoints->pCmplFloat = CmplFloat;
+ qpoints->pFmod = fmod;
+ qpoints->pL2d = __floatdidf;
+ qpoints->pFmodf = fmodf;
+ qpoints->pL2f = __floatdisf;
+ qpoints->pD2iz = __fixdfsi;
+ qpoints->pF2iz = __fixsfsi;
+ qpoints->pIdivmod = NULL;
+ qpoints->pD2l = art_d2l;
+ qpoints->pF2l = art_f2l;
+ qpoints->pLdiv = artLdivFromCode;
+ qpoints->pLdivmod = artLdivmodFromCode;
+ qpoints->pLmul = artLmulFromCode;
+ qpoints->pShlLong = art_quick_shl_long;
+ qpoints->pShrLong = art_quick_shr_long;
+ qpoints->pUshrLong = art_quick_ushr_long;
+
+ // Interpreter
+ qpoints->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry;
+ qpoints->pInterpreterToQuickEntry = artInterpreterToQuickEntry;
+
+ // Intrinsics
+ qpoints->pIndexOf = art_quick_indexof;
+ qpoints->pMemcmp16 = __memcmp16;
+ qpoints->pStringCompareTo = art_quick_string_compareto;
+ qpoints->pMemcpy = memcpy;
+
+ // Invocation
+ qpoints->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline;
+ qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
+ qpoints->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline;
+ qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
+ qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
+ qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
+ qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+
+ // Thread
+ qpoints->pCheckSuspendFromCode = CheckSuspendFromCode;
+ qpoints->pTestSuspendFromCode = art_quick_test_suspend;
+
+ // Throws
+ qpoints->pDeliverException = art_quick_deliver_exception_from_code;
+ qpoints->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code;
+ qpoints->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code;
+ qpoints->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code;
+ qpoints->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code;
+ qpoints->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code;
+
+ // Portable
+ ppoints->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline;
+};
+
+} // namespace art
diff --git a/runtime/arch/mips/jni_entrypoints_mips.S b/runtime/arch/mips/jni_entrypoints_mips.S
new file mode 100644
index 0000000..fca6d77
--- /dev/null
+++ b/runtime/arch/mips/jni_entrypoints_mips.S
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "asm_support_mips.S"
+
+ .set noreorder
+ .balign 4
+
+ /*
+ * Jni dlsym lookup stub.
+ */
+ .extern artFindNativeMethod
+ENTRY art_jni_dlsym_lookup_stub
+ GENERATE_GLOBAL_POINTER
+ addiu $sp, $sp, -32 # leave room for $a0, $a1, $a2, $a3, and $ra
+ .cfi_adjust_cfa_offset 32
+ sw $ra, 16($sp)
+ .cfi_rel_offset 31, 16
+ sw $a3, 12($sp)
+ .cfi_rel_offset 7, 12
+ sw $a2, 8($sp)
+ .cfi_rel_offset 6, 8
+ sw $a1, 4($sp)
+ .cfi_rel_offset 5, 4
+ sw $a0, 0($sp)
+ .cfi_rel_offset 4, 0
+ jal artFindNativeMethod # (Thread*)
+ move $a0, $s1 # pass Thread::Current()
+ lw $a0, 0($sp) # restore registers from stack
+ lw $a1, 4($sp)
+ lw $a2, 8($sp)
+ lw $a3, 12($sp)
+ lw $ra, 16($sp)
+ beq $v0, $zero, no_native_code_found
+ addiu $sp, $sp, 32 # restore the stack
+ .cfi_adjust_cfa_offset -32
+ move $t9, $v0 # put method code result in $t9
+ jr $t9 # leaf call to method's code
+ nop
+no_native_code_found:
+ jr $ra
+ nop
+END art_jni_dlsym_lookup_stub
+
+ /*
+ * Entry point of native methods when JNI bug compatibility is enabled.
+ */
+ .extern artWorkAroundAppJniBugs
+ENTRY art_quick_work_around_app_jni_bugs
+ GENERATE_GLOBAL_POINTER
+ # save registers that may contain arguments and LR that will be crushed by a call
+ addiu $sp, $sp, -32
+ .cfi_adjust_cfa_offset 32
+ sw $ra, 28($sp)
+ .cfi_rel_offset 31, 28
+ sw $a3, 24($sp)
+ .cfi_rel_offset 7, 28
+ sw $a2, 20($sp)
+ .cfi_rel_offset 6, 28
+ sw $a1, 16($sp)
+ .cfi_rel_offset 5, 28
+ sw $a0, 12($sp)
+ .cfi_rel_offset 4, 28
+ move $a0, rSELF # pass Thread::Current
+ jal artWorkAroundAppJniBugs # (Thread*, $sp)
+ move $a1, $sp # pass $sp
+ move $t9, $v0 # save target address
+ lw $a0, 12($sp)
+ lw $a1, 16($sp)
+ lw $a2, 20($sp)
+ lw $a3, 24($sp)
+ lw $ra, 28($sp)
+ jr $t9 # tail call into JNI routine
+ addiu $sp, $sp, 32
+ .cfi_adjust_cfa_offset -32
+END art_quick_work_around_app_jni_bugs
diff --git a/runtime/arch/mips/portable_entrypoints_mips.S b/runtime/arch/mips/portable_entrypoints_mips.S
new file mode 100644
index 0000000..e7a9b0f
--- /dev/null
+++ b/runtime/arch/mips/portable_entrypoints_mips.S
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "asm_support_mips.S"
+
+ .set noreorder
+ .balign 4
+
+ .extern artPortableProxyInvokeHandler
+ENTRY art_portable_proxy_invoke_handler
+ GENERATE_GLOBAL_POINTER
+ # Fake callee save ref and args frame set up, note portable doesn't use callee save frames.
+ # TODO: just save the registers that are needed in artPortableProxyInvokeHandler.
+ addiu $sp, $sp, -64
+ .cfi_adjust_cfa_offset 64
+ sw $ra, 60($sp)
+ .cfi_rel_offset 31, 60
+ sw $s8, 56($sp)
+ .cfi_rel_offset 30, 56
+ sw $gp, 52($sp)
+ .cfi_rel_offset 28, 52
+ sw $s7, 48($sp)
+ .cfi_rel_offset 23, 48
+ sw $s6, 44($sp)
+ .cfi_rel_offset 22, 44
+ sw $s5, 40($sp)
+ .cfi_rel_offset 21, 40
+ sw $s4, 36($sp)
+ .cfi_rel_offset 20, 36
+ sw $s3, 32($sp)
+ .cfi_rel_offset 19, 32
+ sw $s2, 28($sp)
+ .cfi_rel_offset 18, 28
+ sw $a3, 12($sp)
+ .cfi_rel_offset 7, 12
+ sw $a2, 8($sp)
+ .cfi_rel_offset 6, 8
+ sw $a1, 4($sp)
+ .cfi_rel_offset 5, 4
+ # Begin argument set up.
+ sw $a0, 0($sp) # place proxy method at bottom of frame
+ move $a2, rSELF # pass Thread::Current
+ jal artPortableProxyInvokeHandler # (Method* proxy method, receiver, Thread*, SP)
+ move $a3, $sp # pass $sp
+ lw $ra, 60($sp) # restore $ra
+ jr $ra
+ addiu $sp, $sp, 64 # pop frame
+ .cfi_adjust_cfa_offset -64
+END art_portable_proxy_invoke_handler
+
+ /*
+ * Portable abstract method error stub. $a0 contains method* on entry. SP unused in portable.
+ */
+ .extern artThrowAbstractMethodErrorFromCode
+ENTRY art_portable_abstract_method_error_stub
+ GENERATE_GLOBAL_POINTER
+ la $t9, artThrowAbstractMethodErrorFromCode
+ jr $t9 # (Method*, Thread*, SP)
+ move $a1, $s1 # pass Thread::Current
+END art_portable_abstract_method_error_stub
diff --git a/runtime/oat/runtime/mips/runtime_support_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
similarity index 91%
rename from runtime/oat/runtime/mips/runtime_support_mips.S
rename to runtime/arch/mips/quick_entrypoints_mips.S
index 45d583e..d32a2b4 100644
--- a/runtime/oat/runtime/mips/runtime_support_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "asm_support.h"
+#include "asm_support_mips.S"
.set noreorder
.balign 4
@@ -24,25 +24,6 @@
/* Deliver an exception pending on a thread */
.extern artDeliverPendingExceptionFromCode
- /* Cache alignment for function entry */
-.macro ENTRY name
- .type \name, %function
- .global \name
- .balign 16
-\name:
- .cfi_startproc
-.endm
-
-.macro END name
- .cfi_endproc
- .size \name, .-\name
-.endm
-
- /* Generates $gp for function calls */
-.macro GENERATE_GLOBAL_POINTER
- .cpload $t9
-.endm
-
/*
* Macro that sets up the callee save frame to conform with
* Runtime::CreateCalleeSaveMethod(kSaveAll)
@@ -481,39 +462,6 @@
.size art_portable_invoke_stub, .-art_portable_invoke_stub
/*
- * Entry point of native methods when JNI bug compatibility is enabled.
- */
- .extern artWorkAroundAppJniBugs
-ENTRY art_quick_work_around_app_jni_bugs
- GENERATE_GLOBAL_POINTER
- # save registers that may contain arguments and LR that will be crushed by a call
- addiu $sp, $sp, -32
- .cfi_adjust_cfa_offset 32
- sw $ra, 28($sp)
- .cfi_rel_offset 31, 28
- sw $a3, 24($sp)
- .cfi_rel_offset 7, 28
- sw $a2, 20($sp)
- .cfi_rel_offset 6, 28
- sw $a1, 16($sp)
- .cfi_rel_offset 5, 28
- sw $a0, 12($sp)
- .cfi_rel_offset 4, 28
- move $a0, rSELF # pass Thread::Current
- jal artWorkAroundAppJniBugs # (Thread*, $sp)
- move $a1, $sp # pass $sp
- move $t9, $v0 # save target address
- lw $a0, 12($sp)
- lw $a1, 16($sp)
- lw $a2, 20($sp)
- lw $a3, 24($sp)
- lw $ra, 28($sp)
- jr $t9 # tail call into JNI routine
- addiu $sp, $sp, 32
- .cfi_adjust_cfa_offset -32
-END art_quick_work_around_app_jni_bugs
-
- /*
* Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on
* failure.
*/
@@ -912,20 +860,6 @@
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
END art_quick_test_suspend
- .extern artPortableProxyInvokeHandler
-ENTRY art_portable_proxy_invoke_handler
- GENERATE_GLOBAL_POINTER
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
- sw $a0, 0($sp) # place proxy method at bottom of frame
- move $a2, rSELF # pass Thread::Current
- jal artPortableProxyInvokeHandler # (Method* proxy method, receiver, Thread*, SP)
- move $a3, $sp # pass $sp
- lw $ra, 60($sp) # restore $ra
- jr $ra
- addiu $sp, $sp, 64 # pop frame
- .cfi_adjust_cfa_offset -64
-END art_portable_proxy_invoke_handler
-
/*
* Called by managed code that is attempting to call a method on a proxy class. On entry
* r0 holds the proxy method; r1, r2 and r3 may contain arguments.
@@ -1044,17 +978,6 @@
END art_quick_deoptimize
/*
- * Portable abstract method error stub. $a0 contains method* on entry. SP unused in portable.
- */
- .extern artThrowAbstractMethodErrorFromCode
-ENTRY art_portable_abstract_method_error_stub
- GENERATE_GLOBAL_POINTER
- la $t9, artThrowAbstractMethodErrorFromCode
- jr $t9 # (Method*, Thread*, SP)
- move $a1, $s1 # pass Thread::Current
-END art_portable_abstract_method_error_stub
-
- /*
* Quick abstract method error stub. $a0 contains method* on entry.
*/
ENTRY art_quick_abstract_method_error_stub
@@ -1067,42 +990,6 @@
END art_quick_abstract_method_error_stub
/*
- * Jni dlsym lookup stub.
- */
- .extern artFindNativeMethod
-ENTRY art_jni_dlsym_lookup_stub
- GENERATE_GLOBAL_POINTER
- addiu $sp, $sp, -32 # leave room for $a0, $a1, $a2, $a3, and $ra
- .cfi_adjust_cfa_offset 32
- sw $ra, 16($sp)
- .cfi_rel_offset 31, 16
- sw $a3, 12($sp)
- .cfi_rel_offset 7, 12
- sw $a2, 8($sp)
- .cfi_rel_offset 6, 8
- sw $a1, 4($sp)
- .cfi_rel_offset 5, 4
- sw $a0, 0($sp)
- .cfi_rel_offset 4, 0
- jal artFindNativeMethod # (Thread*)
- move $a0, $s1 # pass Thread::Current()
- lw $a0, 0($sp) # restore registers from stack
- lw $a1, 4($sp)
- lw $a2, 8($sp)
- lw $a3, 12($sp)
- lw $ra, 16($sp)
- beq $v0, $zero, no_native_code_found
- addiu $sp, $sp, 32 # restore the stack
- .cfi_adjust_cfa_offset -32
- move $t9, $v0 # put method code result in $t9
- jr $t9 # leaf call to method's code
- nop
-no_native_code_found:
- jr $ra
- nop
-END art_jni_dlsym_lookup_stub
-
- /*
* Long integer shift. This is different from the generic 32/64-bit
* binary operations because vAA/vBB are 64-bit but vCC (the shift
* distance) is 32-bit. Also, Dalvik requires us to ignore all but the low
diff --git a/runtime/arch/mips/registers_mips.cc b/runtime/arch/mips/registers_mips.cc
new file mode 100644
index 0000000..5d31f2f
--- /dev/null
+++ b/runtime/arch/mips/registers_mips.cc
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "registers_mips.h"
+
+#include <ostream>
+
+namespace art {
+namespace mips {
+
+static const char* kRegisterNames[] = {
+ "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3",
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra",
+};
+std::ostream& operator<<(std::ostream& os, const Register& rhs) {
+ if (rhs >= ZERO && rhs <= RA) {
+ os << kRegisterNames[rhs];
+ } else {
+ os << "Register[" << static_cast<int>(rhs) << "]";
+ }
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const FRegister& rhs) {
+ if (rhs >= F0 && rhs < kNumberOfFRegisters) {
+ os << "f" << static_cast<int>(rhs);
+ } else {
+ os << "FRegister[" << static_cast<int>(rhs) << "]";
+ }
+ return os;
+}
+
+} // namespace mips
+} // namespace art
diff --git a/runtime/arch/mips/registers_mips.h b/runtime/arch/mips/registers_mips.h
new file mode 100644
index 0000000..0f784ed
--- /dev/null
+++ b/runtime/arch/mips/registers_mips.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_MIPS_REGISTERS_MIPS_H_
+#define ART_RUNTIME_ARCH_MIPS_REGISTERS_MIPS_H_
+
+#include <iosfwd>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "globals.h"
+
+namespace art {
+namespace mips {
+
+enum Register {
+ ZERO = 0,
+ AT = 1, // Assembler temporary.
+ V0 = 2, // Values.
+ V1 = 3,
+ A0 = 4, // Arguments.
+ A1 = 5,
+ A2 = 6,
+ A3 = 7,
+ T0 = 8, // Temporaries.
+ T1 = 9,
+ T2 = 10,
+ T3 = 11,
+ T4 = 12,
+ T5 = 13,
+ T6 = 14,
+ T7 = 15,
+ S0 = 16, // Saved values.
+ S1 = 17,
+ S2 = 18,
+ S3 = 19,
+ S4 = 20,
+ S5 = 21,
+ S6 = 22,
+ S7 = 23,
+ T8 = 24, // More temporaries.
+ T9 = 25,
+ K0 = 26, // Reserved for trap handler.
+ K1 = 27,
+ GP = 28, // Global pointer.
+ SP = 29, // Stack pointer.
+ FP = 30, // Saved value/frame pointer.
+ RA = 31, // Return address.
+ kNumberOfCoreRegisters = 32,
+ kNoRegister = -1 // Signals an illegal register.
+};
+std::ostream& operator<<(std::ostream& os, const Register& rhs);
+
+// Values for single-precision floating point registers.
+enum FRegister {
+ F0 = 0,
+ F1 = 1,
+ F2 = 2,
+ F3 = 3,
+ F4 = 4,
+ F5 = 5,
+ F6 = 6,
+ F7 = 7,
+ F8 = 8,
+ F9 = 9,
+ F10 = 10,
+ F11 = 11,
+ F12 = 12,
+ F13 = 13,
+ F14 = 14,
+ F15 = 15,
+ F16 = 16,
+ F17 = 17,
+ F18 = 18,
+ F19 = 19,
+ F20 = 20,
+ F21 = 21,
+ F22 = 22,
+ F23 = 23,
+ F24 = 24,
+ F25 = 25,
+ F26 = 26,
+ F27 = 27,
+ F28 = 28,
+ F29 = 29,
+ F30 = 30,
+ F31 = 31,
+ kNumberOfFRegisters = 32,
+ kNoFRegister = -1,
+};
+std::ostream& operator<<(std::ostream& os, const FRegister& rhs);
+
+} // namespace mips
+} // namespace art
+
+#endif // ART_RUNTIME_ARCH_MIPS_REGISTERS_MIPS_H_
diff --git a/runtime/thread_arm.cc b/runtime/arch/mips/thread_mips.cc
similarity index 93%
rename from runtime/thread_arm.cc
rename to runtime/arch/mips/thread_mips.cc
index 0ef26bf..7364de0 100644
--- a/runtime/thread_arm.cc
+++ b/runtime/arch/mips/thread_mips.cc
@@ -16,8 +16,8 @@
#include "thread.h"
-#include "asm_support.h"
-#include "base/macros.h"
+#include "asm_support_mips.h"
+#include "base/logging.h"
namespace art {
diff --git a/runtime/arch/x86/asm_support_x86.S b/runtime/arch/x86/asm_support_x86.S
new file mode 100644
index 0000000..7e6dce9
--- /dev/null
+++ b/runtime/arch/x86/asm_support_x86.S
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_
+#define ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_
+
+#include "asm_support_x86.h"
+
+#if defined(__APPLE__)
+ // Mac OS' as(1) doesn't let you name macro parameters.
+ #define MACRO0(macro_name) .macro macro_name
+ #define MACRO1(macro_name, macro_arg1) .macro macro_name
+ #define MACRO2(macro_name, macro_arg1, macro_args2) .macro macro_name
+ #define MACRO3(macro_name, macro_arg1, macro_args2, macro_args3) .macro macro_name
+ #define END_MACRO .endmacro
+
+ // Mac OS' as(1) uses $0, $1, and so on for macro arguments, and function names
+ // are mangled with an extra underscore prefix. The use of $x for arguments
+ // mean that literals need to be represented with $$x in macros.
+ #define SYMBOL(name) _ ## name
+ #define VAR(name,index) SYMBOL($index)
+ #define REG_VAR(name,index) %$index
+ #define CALL_MACRO(name,index) $index
+ #define LITERAL(value) $value
+ #define MACRO_LITERAL(value) $$value
+#else
+ // Regular gas(1) lets you name macro parameters.
+ #define MACRO0(macro_name) .macro macro_name
+ #define MACRO1(macro_name, macro_arg1) .macro macro_name macro_arg1
+ #define MACRO2(macro_name, macro_arg1, macro_arg2) .macro macro_name macro_arg1, macro_arg2
+ #define MACRO3(macro_name, macro_arg1, macro_arg2, macro_arg3) .macro macro_name macro_arg1, macro_arg2, macro_arg3
+ #define END_MACRO .endm
+
+ // Regular gas(1) uses \argument_name for macro arguments.
+ // We need to turn on alternate macro syntax so we can use & instead or the preprocessor
+ // will screw us by inserting a space between the \ and the name. Even in this mode there's
+ // no special meaning to $, so literals are still just $x. The use of altmacro means % is a
+ // special character meaning care needs to be taken when passing registers as macro arguments.
+ .altmacro
+ #define SYMBOL(name) name
+ #define VAR(name,index) name&
+ #define REG_VAR(name,index) %name
+ #define CALL_MACRO(name,index) name&
+ #define LITERAL(value) $value
+ #define MACRO_LITERAL(value) $value
+#endif
+
+ /* Cache alignment for function entry */
+MACRO0(ALIGN_FUNCTION_ENTRY)
+ .balign 16
+END_MACRO
+
+MACRO1(DEFINE_FUNCTION, c_name)
+ .type VAR(c_name, 0), @function
+ .globl VAR(c_name, 0)
+ ALIGN_FUNCTION_ENTRY
+VAR(c_name, 0):
+ .cfi_startproc
+END_MACRO
+
+MACRO1(END_FUNCTION, c_name)
+ .cfi_endproc
+ .size \c_name, .-\c_name
+END_MACRO
+
+MACRO1(PUSH, reg)
+ pushl REG_VAR(reg, 0)
+ .cfi_adjust_cfa_offset 4
+ .cfi_rel_offset REG_VAR(reg, 0), 0
+END_MACRO
+
+MACRO1(POP, reg)
+ popl REG_VAR(reg,0)
+ .cfi_adjust_cfa_offset -4
+ .cfi_restore REG_VAR(reg,0)
+END_MACRO
+
+#endif // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_
diff --git a/runtime/thread_arm.cc b/runtime/arch/x86/asm_support_x86.h
similarity index 60%
copy from runtime/thread_arm.cc
copy to runtime/arch/x86/asm_support_x86.h
index 0ef26bf..1092910 100644
--- a/runtime/thread_arm.cc
+++ b/runtime/arch/x86/asm_support_x86.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 The Android Open Source Project
+ * Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,16 +14,14 @@
* limitations under the License.
*/
-#include "thread.h"
+#ifndef ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_
+#define ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_
#include "asm_support.h"
-#include "base/macros.h"
-namespace art {
+// Offset of field Thread::self_ verified in InitCpu
+#define THREAD_SELF_OFFSET 40
+// Offset of field Thread::exception_ verified in InitCpu
+#define THREAD_EXCEPTION_OFFSET 12
-void Thread::InitCpu() {
- CHECK_EQ(THREAD_FLAGS_OFFSET, OFFSETOF_MEMBER(Thread, state_and_flags_));
- CHECK_EQ(THREAD_EXCEPTION_OFFSET, OFFSETOF_MEMBER(Thread, exception_));
-}
-
-} // namespace art
+#endif // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_H_
diff --git a/runtime/oat/runtime/x86/context_x86.cc b/runtime/arch/x86/context_x86.cc
similarity index 100%
rename from runtime/oat/runtime/x86/context_x86.cc
rename to runtime/arch/x86/context_x86.cc
diff --git a/runtime/oat/runtime/x86/context_x86.h b/runtime/arch/x86/context_x86.h
similarity index 84%
rename from runtime/oat/runtime/x86/context_x86.h
rename to runtime/arch/x86/context_x86.h
index 4ecfc51..d7d2210 100644
--- a/runtime/oat/runtime/x86/context_x86.h
+++ b/runtime/arch/x86/context_x86.h
@@ -14,11 +14,12 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_OAT_RUNTIME_X86_CONTEXT_X86_H_
-#define ART_RUNTIME_OAT_RUNTIME_X86_CONTEXT_X86_H_
+#ifndef ART_RUNTIME_ARCH_X86_CONTEXT_X86_H_
+#define ART_RUNTIME_ARCH_X86_CONTEXT_X86_H_
-#include "constants_x86.h"
-#include "oat/runtime/context.h"
+#include "arch/context.h"
+#include "base/logging.h"
+#include "registers_x86.h"
namespace art {
namespace x86 {
@@ -43,7 +44,8 @@
}
virtual uintptr_t GetGPR(uint32_t reg) {
- CHECK_LT(reg, static_cast<uint32_t>(kNumberOfCpuRegisters));
+ const uint32_t kNumberOfCpuRegisters = 8;
+ DCHECK_LT(reg, kNumberOfCpuRegisters);
return *gprs_[reg];
}
@@ -64,4 +66,4 @@
} // namespace x86
} // namespace art
-#endif // ART_RUNTIME_OAT_RUNTIME_X86_CONTEXT_X86_H_
+#endif // ART_RUNTIME_ARCH_X86_CONTEXT_X86_H_
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
new file mode 100644
index 0000000..d47dfef
--- /dev/null
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -0,0 +1,224 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "entrypoints/portable/portable_entrypoints.h"
+#include "entrypoints/quick/quick_entrypoints.h"
+#include "entrypoints/entrypoint_utils.h"
+
+namespace art {
+
+// Alloc entrypoints.
+extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
+extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method);
+extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method);
+extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t);
+extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
+
+// Cast entrypoints.
+extern "C" uint32_t art_quick_is_assignable_from_code(const mirror::Class* klass,
+ const mirror::Class* ref_class);
+extern "C" void art_quick_can_put_array_element_from_code(void*, void*);
+extern "C" void art_quick_check_cast_from_code(void*, void*);
+
+// DexCache entrypoints.
+extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*);
+extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*);
+extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*);
+extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t);
+
+// Field entrypoints.
+extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t);
+extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t);
+extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t);
+extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t);
+extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*);
+extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*);
+extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*);
+extern "C" int32_t art_quick_get32_static_from_code(uint32_t);
+extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*);
+extern "C" int64_t art_quick_get64_static_from_code(uint32_t);
+extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*);
+extern "C" void* art_quick_get_obj_static_from_code(uint32_t);
+
+// FillArray entrypoint.
+extern "C" void art_quick_handle_fill_data_from_code(void*, void*);
+
+// Lock entrypoints.
+extern "C" void art_quick_lock_object_from_code(void*);
+extern "C" void art_quick_unlock_object_from_code(void*);
+
+// Math entrypoints.
+extern "C" double art_quick_fmod_from_code(double, double);
+extern "C" float art_quick_fmodf_from_code(float, float);
+extern "C" double art_quick_l2d_from_code(int64_t);
+extern "C" float art_quick_l2f_from_code(int64_t);
+extern "C" int64_t art_quick_d2l_from_code(double);
+extern "C" int64_t art_quick_f2l_from_code(float);
+extern "C" int32_t art_quick_idivmod_from_code(int32_t, int32_t);
+extern "C" int64_t art_quick_ldiv_from_code(int64_t, int64_t);
+extern "C" int64_t art_quick_ldivmod_from_code(int64_t, int64_t);
+extern "C" int64_t art_quick_lmul_from_code(int64_t, int64_t);
+extern "C" uint64_t art_quick_lshl_from_code(uint64_t, uint32_t);
+extern "C" uint64_t art_quick_lshr_from_code(uint64_t, uint32_t);
+extern "C" uint64_t art_quick_lushr_from_code(uint64_t, uint32_t);
+
+// Interpreter entrypoints.
+extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result);
+extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh,
+ const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result);
+
+// Intrinsic entrypoints.
+extern "C" int32_t art_quick_memcmp16(void*, void*, int32_t);
+extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
+extern "C" int32_t art_quick_string_compareto(void*, void*);
+extern "C" void* art_quick_memcpy(void*, const void*, size_t);
+
+// Invoke entrypoints.
+extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called,
+ mirror::Object* receiver,
+ mirror::AbstractMethod** sp, Thread* thread);
+extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called,
+ mirror::Object* receiver,
+ mirror::AbstractMethod** sp, Thread* thread);
+extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*);
+extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*);
+extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*);
+
+// Thread entrypoints.
+extern void CheckSuspendFromCode(Thread* thread);
+extern "C" void art_quick_test_suspend();
+
+// Throw entrypoints.
+extern "C" void art_quick_deliver_exception_from_code(void*);
+extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit);
+extern "C" void art_quick_throw_div_zero_from_code();
+extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx);
+extern "C" void art_quick_throw_null_pointer_exception_from_code();
+extern "C" void art_quick_throw_stack_overflow_from_code(void*);
+
+void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints) {
+ // Alloc
+ qpoints->pAllocArrayFromCode = art_quick_alloc_array_from_code;
+ qpoints->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check;
+ qpoints->pAllocObjectFromCode = art_quick_alloc_object_from_code;
+ qpoints->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check;
+ qpoints->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code;
+ qpoints->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check;
+
+ // Cast
+ qpoints->pInstanceofNonTrivialFromCode = art_quick_is_assignable_from_code;
+ qpoints->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code;
+ qpoints->pCheckCastFromCode = art_quick_check_cast_from_code;
+
+ // DexCache
+ qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code;
+ qpoints->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code;
+ qpoints->pInitializeTypeFromCode = art_quick_initialize_type_from_code;
+ qpoints->pResolveStringFromCode = art_quick_resolve_string_from_code;
+
+ // Field
+ qpoints->pSet32Instance = art_quick_set32_instance_from_code;
+ qpoints->pSet32Static = art_quick_set32_static_from_code;
+ qpoints->pSet64Instance = art_quick_set64_instance_from_code;
+ qpoints->pSet64Static = art_quick_set64_static_from_code;
+ qpoints->pSetObjInstance = art_quick_set_obj_instance_from_code;
+ qpoints->pSetObjStatic = art_quick_set_obj_static_from_code;
+ qpoints->pGet32Instance = art_quick_get32_instance_from_code;
+ qpoints->pGet64Instance = art_quick_get64_instance_from_code;
+ qpoints->pGetObjInstance = art_quick_get_obj_instance_from_code;
+ qpoints->pGet32Static = art_quick_get32_static_from_code;
+ qpoints->pGet64Static = art_quick_get64_static_from_code;
+ qpoints->pGetObjStatic = art_quick_get_obj_static_from_code;
+
+ // FillArray
+ qpoints->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code;
+
+ // JNI
+ qpoints->pJniMethodStart = JniMethodStart;
+ qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized;
+ qpoints->pJniMethodEnd = JniMethodEnd;
+ qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized;
+ qpoints->pJniMethodEndWithReference = JniMethodEndWithReference;
+ qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
+
+ // Locks
+ qpoints->pLockObjectFromCode = art_quick_lock_object_from_code;
+ qpoints->pUnlockObjectFromCode = art_quick_unlock_object_from_code;
+
+ // Math
+ // points->pCmpgDouble = NULL; // Not needed on x86.
+ // points->pCmpgFloat = NULL; // Not needed on x86.
+ // points->pCmplDouble = NULL; // Not needed on x86.
+ // points->pCmplFloat = NULL; // Not needed on x86.
+ qpoints->pFmod = art_quick_fmod_from_code;
+ qpoints->pL2d = art_quick_l2d_from_code;
+ qpoints->pFmodf = art_quick_fmodf_from_code;
+ qpoints->pL2f = art_quick_l2f_from_code;
+ // points->pD2iz = NULL; // Not needed on x86.
+ // points->pF2iz = NULL; // Not needed on x86.
+ qpoints->pIdivmod = art_quick_idivmod_from_code;
+ qpoints->pD2l = art_quick_d2l_from_code;
+ qpoints->pF2l = art_quick_f2l_from_code;
+ qpoints->pLdiv = art_quick_ldiv_from_code;
+ qpoints->pLdivmod = art_quick_ldivmod_from_code;
+ qpoints->pLmul = art_quick_lmul_from_code;
+ qpoints->pShlLong = art_quick_lshl_from_code;
+ qpoints->pShrLong = art_quick_lshr_from_code;
+ qpoints->pUshrLong = art_quick_lushr_from_code;
+
+ // Interpreter
+ qpoints->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry;
+ qpoints->pInterpreterToQuickEntry = artInterpreterToQuickEntry;
+
+ // Intrinsics
+ qpoints->pIndexOf = art_quick_indexof;
+ qpoints->pMemcmp16 = art_quick_memcmp16;
+ qpoints->pStringCompareTo = art_quick_string_compareto;
+ qpoints->pMemcpy = art_quick_memcpy;
+
+ // Invocation
+ qpoints->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline;
+ qpoints->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
+ qpoints->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline;
+ qpoints->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
+ qpoints->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
+ qpoints->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
+ qpoints->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
+
+ // Thread
+ qpoints->pCheckSuspendFromCode = CheckSuspendFromCode;
+ qpoints->pTestSuspendFromCode = art_quick_test_suspend;
+
+ // Throws
+ qpoints->pDeliverException = art_quick_deliver_exception_from_code;
+ qpoints->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code;
+ qpoints->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code;
+ qpoints->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code;
+ qpoints->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code;
+ qpoints->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code;
+
+ // Portable
+ ppoints->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline;
+};
+
+} // namespace art
diff --git a/runtime/arch/x86/jni_entrypoints_x86.S b/runtime/arch/x86/jni_entrypoints_x86.S
new file mode 100644
index 0000000..e9c88fe
--- /dev/null
+++ b/runtime/arch/x86/jni_entrypoints_x86.S
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "asm_support_x86.S"
+
+ /*
+ * Portable resolution trampoline.
+ */
+DEFINE_FUNCTION art_jni_dlsym_lookup_stub
+ subl LITERAL(8), %esp // align stack
+ .cfi_adjust_cfa_offset 8
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ .cfi_adjust_cfa_offset 4
+ call SYMBOL(artFindNativeMethod) // (Thread*)
+ addl LITERAL(12), %esp // restore the stack
+ .cfi_adjust_cfa_offset -12
+ cmpl LITERAL(0), %eax // check if returned method code is null
+ je no_native_code_found // if null, jump to return to handle
+ jmp *%eax // otherwise, tail call to intended method
+no_native_code_found:
+ ret
+END_FUNCTION art_jni_dlsym_lookup_stub
diff --git a/runtime/arch/x86/portable_entrypoints_x86.S b/runtime/arch/x86/portable_entrypoints_x86.S
new file mode 100644
index 0000000..a0fca6c
--- /dev/null
+++ b/runtime/arch/x86/portable_entrypoints_x86.S
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "asm_support_x86.S"
+
+ /*
+ * Portable invocation stub.
+ * On entry:
+ * [sp] = return address
+ * [sp + 4] = method pointer
+ * [sp + 8] = argument array or NULL for no argument methods
+ * [sp + 12] = size of argument array in bytes
+ * [sp + 16] = (managed) thread pointer
+ * [sp + 20] = JValue* result
+ * [sp + 24] = result type char
+ */
+DEFINE_FUNCTION art_portable_invoke_stub
+ PUSH ebp // save ebp
+ PUSH ebx // save ebx
+ mov %esp, %ebp // copy value of stack pointer into base pointer
+ .cfi_def_cfa_register ebp
+ mov 20(%ebp), %ebx // get arg array size
+ addl LITERAL(28), %ebx // reserve space for return addr, method*, ebx, and ebp in frame
+ andl LITERAL(0xFFFFFFF0), %ebx // align frame size to 16 bytes
+ subl LITERAL(12), %ebx // remove space for return address, ebx, and ebp
+ subl %ebx, %esp // reserve stack space for argument array
+ lea 4(%esp), %eax // use stack pointer + method ptr as dest for memcpy
+ pushl 20(%ebp) // push size of region to memcpy
+ pushl 16(%ebp) // push arg array as source of memcpy
+ pushl %eax // push stack pointer as destination of memcpy
+ call SYMBOL(memcpy) // (void*, const void*, size_t)
+ addl LITERAL(12), %esp // pop arguments to memcpy
+ mov 12(%ebp), %eax // move method pointer into eax
+ mov %eax, (%esp) // push method pointer onto stack
+ call *METHOD_CODE_OFFSET(%eax) // call the method
+ mov %ebp, %esp // restore stack pointer
+ POP ebx // pop ebx
+ POP ebp // pop ebp
+ mov 20(%esp), %ecx // get result pointer
+ cmpl LITERAL(68), 24(%esp) // test if result type char == 'D'
+ je return_double_portable
+ cmpl LITERAL(70), 24(%esp) // test if result type char == 'F'
+ je return_float_portable
+ mov %eax, (%ecx) // store the result
+ mov %edx, 4(%ecx) // store the other half of the result
+ ret
+return_double_portable:
+ fstpl (%ecx) // store the floating point result as double
+ ret
+return_float_portable:
+ fstps (%ecx) // store the floating point result as float
+ ret
+END_FUNCTION art_portable_invoke_stub
+
+DEFINE_FUNCTION art_portable_proxy_invoke_handler
+ // Fake callee save ref and args frame set up, note portable doesn't use callee save frames.
+ // TODO: just save the registers that are needed in artPortableProxyInvokeHandler.
+ PUSH edi // Save callee saves
+ PUSH esi
+ PUSH ebp
+ PUSH ebx // Save args
+ PUSH edx
+ PUSH ecx
+ PUSH eax // Align stack, eax will be clobbered by Method*
+ // Begin argument set up.
+ PUSH esp // pass SP
+ pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
+ .cfi_adjust_cfa_offset 4
+ PUSH ecx // pass receiver
+ PUSH eax // pass proxy method
+ call SYMBOL(artPortableProxyInvokeHandler) // (proxy method, receiver, Thread*, SP)
+ movd %eax, %xmm0 // place return value also into floating point return value
+ movd %edx, %xmm1
+ punpckldq %xmm1, %xmm0
+ addl LITERAL(44), %esp // pop arguments
+ .cfi_adjust_cfa_offset -44
+ ret
+END_FUNCTION art_portable_proxy_invoke_handler
+
+ /*
+ * Portable abstract method error stub. method* is at %esp + 4 on entry.
+ */
+DEFINE_FUNCTION art_portable_abstract_method_error_stub
+ PUSH ebp
+ movl %esp, %ebp // Remember SP.
+ .cfi_def_cfa_register ebp
+ subl LITERAL(12), %esp // Align stack.
+ PUSH esp // Pass sp (not used).
+ pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current().
+ pushl 8(%ebp) // Pass Method*.
+ call SYMBOL(artThrowAbstractMethodErrorFromCode) // (Method*, Thread*, SP)
+ leave // Restore the stack and %ebp.
+ .cfi_def_cfa esp, 4
+ .cfi_restore ebp
+ ret // Return to caller to handle pending exception.
+END_FUNCTION art_portable_abstract_method_error_stub
diff --git a/runtime/oat/runtime/x86/runtime_support_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
similarity index 85%
rename from runtime/oat/runtime/x86/runtime_support_x86.S
rename to runtime/arch/x86/quick_entrypoints_x86.S
index ee6db0c..89ea71a 100644
--- a/runtime/oat/runtime/x86/runtime_support_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -14,76 +14,7 @@
* limitations under the License.
*/
-#include "asm_support.h"
-
-#if defined(__APPLE__)
- // Mac OS' as(1) doesn't let you name macro parameters.
- #define MACRO0(macro_name) .macro macro_name
- #define MACRO1(macro_name, macro_arg1) .macro macro_name
- #define MACRO2(macro_name, macro_arg1, macro_args2) .macro macro_name
- #define MACRO3(macro_name, macro_arg1, macro_args2, macro_args3) .macro macro_name
- #define END_MACRO .endmacro
-
- // Mac OS' as(1) uses $0, $1, and so on for macro arguments, and function names
- // are mangled with an extra underscore prefix. The use of $x for arguments
- // mean that literals need to be represented with $$x in macros.
- #define SYMBOL(name) _ ## name
- #define VAR(name,index) SYMBOL($index)
- #define REG_VAR(name,index) %$index
- #define CALL_MACRO(name,index) $index
- #define LITERAL(value) $value
- #define MACRO_LITERAL(value) $$value
-#else
- // Regular gas(1) lets you name macro parameters.
- #define MACRO0(macro_name) .macro macro_name
- #define MACRO1(macro_name, macro_arg1) .macro macro_name macro_arg1
- #define MACRO2(macro_name, macro_arg1, macro_arg2) .macro macro_name macro_arg1, macro_arg2
- #define MACRO3(macro_name, macro_arg1, macro_arg2, macro_arg3) .macro macro_name macro_arg1, macro_arg2, macro_arg3
- #define END_MACRO .endm
-
- // Regular gas(1) uses \argument_name for macro arguments.
- // We need to turn on alternate macro syntax so we can use & instead or the preprocessor
- // will screw us by inserting a space between the \ and the name. Even in this mode there's
- // no special meaning to $, so literals are still just $x. The use of altmacro means % is a
- // special character meaning care needs to be taken when passing registers as macro arguments.
- .altmacro
- #define SYMBOL(name) name
- #define VAR(name,index) name&
- #define REG_VAR(name,index) %name
- #define CALL_MACRO(name,index) name&
- #define LITERAL(value) $value
- #define MACRO_LITERAL(value) $value
-#endif
-
- /* Cache alignment for function entry */
-MACRO0(ALIGN_FUNCTION_ENTRY)
- .balign 16
-END_MACRO
-
-MACRO1(DEFINE_FUNCTION, c_name)
- .type VAR(c_name, 0), @function
- .globl VAR(c_name, 0)
- ALIGN_FUNCTION_ENTRY
-VAR(c_name, 0):
- .cfi_startproc
-END_MACRO
-
-MACRO1(END_FUNCTION, c_name)
- .cfi_endproc
- .size \c_name, .-\c_name
-END_MACRO
-
-MACRO1(PUSH, reg)
- pushl REG_VAR(reg, 0)
- .cfi_adjust_cfa_offset 4
- .cfi_rel_offset REG_VAR(reg, 0), 0
-END_MACRO
-
-MACRO1(POP, reg)
- popl REG_VAR(reg,0)
- .cfi_adjust_cfa_offset -4
- .cfi_restore REG_VAR(reg,0)
-END_MACRO
+#include "asm_support_x86.S"
/*
* Macro that sets up the callee save frame to conform with
@@ -302,55 +233,6 @@
INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
/*
- * Portable invocation stub.
- * On entry:
- * [sp] = return address
- * [sp + 4] = method pointer
- * [sp + 8] = argument array or NULL for no argument methods
- * [sp + 12] = size of argument array in bytes
- * [sp + 16] = (managed) thread pointer
- * [sp + 20] = JValue* result
- * [sp + 24] = result type char
- */
-DEFINE_FUNCTION art_portable_invoke_stub
- PUSH ebp // save ebp
- PUSH ebx // save ebx
- mov %esp, %ebp // copy value of stack pointer into base pointer
- .cfi_def_cfa_register ebp
- mov 20(%ebp), %ebx // get arg array size
- addl LITERAL(28), %ebx // reserve space for return addr, method*, ebx, and ebp in frame
- andl LITERAL(0xFFFFFFF0), %ebx // align frame size to 16 bytes
- subl LITERAL(12), %ebx // remove space for return address, ebx, and ebp
- subl %ebx, %esp // reserve stack space for argument array
- lea 4(%esp), %eax // use stack pointer + method ptr as dest for memcpy
- pushl 20(%ebp) // push size of region to memcpy
- pushl 16(%ebp) // push arg array as source of memcpy
- pushl %eax // push stack pointer as destination of memcpy
- call SYMBOL(memcpy) // (void*, const void*, size_t)
- addl LITERAL(12), %esp // pop arguments to memcpy
- mov 12(%ebp), %eax // move method pointer into eax
- mov %eax, (%esp) // push method pointer onto stack
- call *METHOD_CODE_OFFSET(%eax) // call the method
- mov %ebp, %esp // restore stack pointer
- POP ebx // pop ebx
- POP ebp // pop ebp
- mov 20(%esp), %ecx // get result pointer
- cmpl LITERAL(68), 24(%esp) // test if result type char == 'D'
- je return_double_portable
- cmpl LITERAL(70), 24(%esp) // test if result type char == 'F'
- je return_float_portable
- mov %eax, (%ecx) // store the result
- mov %edx, 4(%ecx) // store the other half of the result
- ret
-return_double_portable:
- fstpl (%ecx) // store the floating point result as double
- ret
-return_float_portable:
- fstps (%ecx) // store the floating point result as float
- ret
-END_FUNCTION art_portable_invoke_stub
-
- /*
* Quick invocation stub.
* On entry:
* [sp] = return address
@@ -920,22 +802,6 @@
RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception
END_FUNCTION art_quick_get_obj_static_from_code
-DEFINE_FUNCTION art_portable_proxy_invoke_handler
- SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // save frame and Method*
- PUSH esp // pass SP
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
- PUSH ecx // pass receiver
- PUSH eax // pass proxy method
- call SYMBOL(artPortableProxyInvokeHandler) // (proxy method, receiver, Thread*, SP)
- movd %eax, %xmm0 // place return value also into floating point return value
- movd %edx, %xmm1
- punpckldq %xmm1, %xmm0
- addl LITERAL(44), %esp // pop arguments
- .cfi_adjust_cfa_offset -44
- ret
-END_FUNCTION art_portable_proxy_invoke_handler
-
DEFINE_FUNCTION art_quick_proxy_invoke_handler
SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // save frame and Method*
PUSH esp // pass SP
@@ -1054,24 +920,6 @@
END_FUNCTION art_quick_deoptimize
/*
- * Portable abstract method error stub. method* is at %esp + 4 on entry.
- */
-DEFINE_FUNCTION art_portable_abstract_method_error_stub
- PUSH ebp
- movl %esp, %ebp // Remember SP.
- .cfi_def_cfa_register ebp
- subl LITERAL(12), %esp // Align stack.
- PUSH esp // Pass sp (not used).
- pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current().
- pushl 8(%ebp) // Pass Method*.
- call SYMBOL(artThrowAbstractMethodErrorFromCode) // (Method*, Thread*, SP)
- leave // Restore the stack and %ebp.
- .cfi_def_cfa esp, 4
- .cfi_restore ebp
- ret // Return to caller to handle pending exception.
-END_FUNCTION art_portable_abstract_method_error_stub
-
- /*
* Quick abstract method error stub. %eax contains method* on entry.
*/
DEFINE_FUNCTION art_quick_abstract_method_error_stub
@@ -1087,24 +935,6 @@
END_FUNCTION art_quick_abstract_method_error_stub
/*
- * Portable resolution trampoline.
- */
-DEFINE_FUNCTION art_jni_dlsym_lookup_stub
- subl LITERAL(8), %esp // align stack
- .cfi_adjust_cfa_offset 8
- pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
- .cfi_adjust_cfa_offset 4
- call SYMBOL(artFindNativeMethod) // (Thread*)
- addl LITERAL(12), %esp // restore the stack
- .cfi_adjust_cfa_offset -12
- cmpl LITERAL(0), %eax // check if returned method code is null
- je no_native_code_found // if null, jump to return to handle
- jmp *%eax // otherwise, tail call to intended method
-no_native_code_found:
- ret
-END_FUNCTION art_jni_dlsym_lookup_stub
-
- /*
* String's indexOf.
*
* On entry:
diff --git a/runtime/thread_arm.cc b/runtime/arch/x86/registers_x86.cc
similarity index 62%
copy from runtime/thread_arm.cc
copy to runtime/arch/x86/registers_x86.cc
index 0ef26bf..4255d64 100644
--- a/runtime/thread_arm.cc
+++ b/runtime/arch/x86/registers_x86.cc
@@ -14,16 +14,24 @@
* limitations under the License.
*/
-#include "thread.h"
+#include "registers_x86.h"
-#include "asm_support.h"
-#include "base/macros.h"
+#include <ostream>
namespace art {
+namespace x86 {
-void Thread::InitCpu() {
- CHECK_EQ(THREAD_FLAGS_OFFSET, OFFSETOF_MEMBER(Thread, state_and_flags_));
- CHECK_EQ(THREAD_EXCEPTION_OFFSET, OFFSETOF_MEMBER(Thread, exception_));
+static const char* kRegisterNames[] = {
+ "eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi",
+};
+std::ostream& operator<<(std::ostream& os, const Register& rhs) {
+ if (rhs >= EAX && rhs <= EDI) {
+ os << kRegisterNames[rhs];
+ } else {
+ os << "Register[" << static_cast<int>(rhs) << "]";
+ }
+ return os;
}
+} // namespace x86
} // namespace art
diff --git a/runtime/arch/x86/registers_x86.h b/runtime/arch/x86/registers_x86.h
new file mode 100644
index 0000000..23027ed
--- /dev/null
+++ b/runtime/arch/x86/registers_x86.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ARCH_X86_REGISTERS_X86_H_
+#define ART_RUNTIME_ARCH_X86_REGISTERS_X86_H_
+
+#include <iosfwd>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "globals.h"
+
+namespace art {
+namespace x86 {
+
+enum Register {
+ EAX = 0,
+ ECX = 1,
+ EDX = 2,
+ EBX = 3,
+ ESP = 4,
+ EBP = 5,
+ ESI = 6,
+ EDI = 7,
+ kNumberOfCpuRegisters = 8,
+ kFirstByteUnsafeRegister = 4,
+ kNoRegister = -1 // Signals an illegal register.
+};
+std::ostream& operator<<(std::ostream& os, const Register& rhs);
+
+} // namespace x86
+} // namespace art
+
+#endif // ART_RUNTIME_ARCH_X86_REGISTERS_X86_H_
diff --git a/runtime/thread_x86.cc b/runtime/arch/x86/thread_x86.cc
similarity index 98%
rename from runtime/thread_x86.cc
rename to runtime/arch/x86/thread_x86.cc
index c398b28..dd3e7dd 100644
--- a/runtime/thread_x86.cc
+++ b/runtime/arch/x86/thread_x86.cc
@@ -19,7 +19,7 @@
#include <sys/syscall.h>
#include <sys/types.h>
-#include "asm_support.h"
+#include "asm_support_x86.h"
#include "base/macros.h"
#include "thread.h"
#include "thread_list.h"
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 7b20c7a..aca93a5 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -30,29 +30,4 @@
// Offset of field Method::entry_point_from_compiled_code_
#define METHOD_CODE_OFFSET 40
-#if defined(__arm__)
-// Register holding suspend check count down.
-#define rSUSPEND r4
-// Register holding Thread::Current().
-#define rSELF r9
-// Offset of field Thread::suspend_count_ verified in InitCpu
-#define THREAD_FLAGS_OFFSET 0
-// Offset of field Thread::exception_ verified in InitCpu
-#define THREAD_EXCEPTION_OFFSET 12
-#elif defined(__mips__)
-// Register holding suspend check count down.
-#define rSUSPEND $s0
-// Register holding Thread::Current().
-#define rSELF $s1
-// Offset of field Thread::suspend_count_ verified in InitCpu
-#define THREAD_FLAGS_OFFSET 0
-// Offset of field Thread::exception_ verified in InitCpu
-#define THREAD_EXCEPTION_OFFSET 12
-#elif defined(__i386__)
-// Offset of field Thread::self_ verified in InitCpu
-#define THREAD_SELF_OFFSET 40
-// Offset of field Thread::exception_ verified in InitCpu
-#define THREAD_EXCEPTION_OFFSET 12
-#endif
-
#endif // ART_RUNTIME_ASM_SUPPORT_H_
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index dea52a6..b924798 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -53,7 +53,7 @@
class ScopedContentionRecorder;
class Thread;
-const bool kDebugLocking = kIsDebugBuild;
+const bool kDebugLocking = true || kIsDebugBuild;
// Base class for all Mutex implementations
class BaseMutex {
diff --git a/runtime/base/timing_logger.cc b/runtime/base/timing_logger.cc
index bf6fd17..dfb0220 100644
--- a/runtime/base/timing_logger.cc
+++ b/runtime/base/timing_logger.cc
@@ -14,6 +14,11 @@
* limitations under the License.
*/
+
+#define ATRACE_TAG ATRACE_TAG_DALVIK
+#include <stdio.h>
+#include <cutils/trace.h>
+
#include "timing_logger.h"
#include "base/logging.h"
@@ -26,49 +31,6 @@
namespace art {
-void TimingLogger::Reset() {
- times_.clear();
- labels_.clear();
- AddSplit("");
-}
-
-TimingLogger::TimingLogger(const std::string &name, bool precise)
- : name_(name),
- precise_(precise) {
- AddSplit("");
-}
-
-void TimingLogger::AddSplit(const std::string &label) {
- times_.push_back(NanoTime());
- labels_.push_back(label);
-}
-
-uint64_t TimingLogger::GetTotalNs() const {
- return times_.back() - times_.front();
-}
-
-void TimingLogger::Dump(std::ostream &os) const {
- uint64_t largest_time = 0;
- os << name_ << ": begin\n";
- for (size_t i = 1; i < times_.size(); ++i) {
- uint64_t delta_time = times_[i] - times_[i - 1];
- largest_time = std::max(largest_time, delta_time);
- }
- // Compute which type of unit we will use for printing the timings.
- TimeUnit tu = GetAppropriateTimeUnit(largest_time);
- uint64_t divisor = GetNsToTimeUnitDivisor(tu);
- for (size_t i = 1; i < times_.size(); ++i) {
- uint64_t delta_time = times_[i] - times_[i - 1];
- if (!precise_ && divisor >= 1000) {
- // Make the fraction 0.
- delta_time -= delta_time % (divisor / 1000);
- }
- os << name_ << ": " << std::setw(8) << FormatDuration(delta_time, tu) << " "
- << labels_[i] << "\n";
- }
- os << name_ << ": end, " << NsToMs(GetTotalNs()) << " ms\n";
-}
-
CumulativeLogger::CumulativeLogger(const std::string& name)
: name_(name),
lock_name_("CumulativeLoggerLock" + name),
@@ -112,17 +74,8 @@
return total;
}
-void CumulativeLogger::AddLogger(const TimingLogger &logger) {
- MutexLock mu(Thread::Current(), lock_);
- DCHECK_EQ(logger.times_.size(), logger.labels_.size());
- for (size_t i = 1; i < logger.times_.size(); ++i) {
- const uint64_t delta_time = logger.times_[i] - logger.times_[i - 1];
- const std::string &label = logger.labels_[i];
- AddPair(label, delta_time);
- }
-}
-void CumulativeLogger::AddNewLogger(const base::NewTimingLogger &logger) {
+void CumulativeLogger::AddLogger(const base::TimingLogger &logger) {
MutexLock mu(Thread::Current(), lock_);
const std::vector<std::pair<uint64_t, const char*> >& splits = logger.GetSplits();
typedef std::vector<std::pair<uint64_t, const char*> >::const_iterator It;
@@ -183,51 +136,55 @@
namespace base {
-NewTimingLogger::NewTimingLogger(const char* name, bool precise, bool verbose)
+TimingLogger::TimingLogger(const char* name, bool precise, bool verbose)
: name_(name), precise_(precise), verbose_(verbose),
current_split_(NULL), current_split_start_ns_(0) {
}
-void NewTimingLogger::Reset() {
+void TimingLogger::Reset() {
current_split_ = NULL;
current_split_start_ns_ = 0;
splits_.clear();
}
-void NewTimingLogger::StartSplit(const char* new_split_label) {
+void TimingLogger::StartSplit(const char* new_split_label) {
DCHECK(current_split_ == NULL);
if (verbose_) {
LOG(INFO) << "Begin: " << new_split_label;
}
current_split_ = new_split_label;
+ ATRACE_BEGIN(current_split_);
current_split_start_ns_ = NanoTime();
}
// Ends the current split and starts the one given by the label.
-void NewTimingLogger::NewSplit(const char* new_split_label) {
+void TimingLogger::NewSplit(const char* new_split_label) {
DCHECK(current_split_ != NULL);
uint64_t current_time = NanoTime();
uint64_t split_time = current_time - current_split_start_ns_;
+ ATRACE_END();
splits_.push_back(std::pair<uint64_t, const char*>(split_time, current_split_));
if (verbose_) {
LOG(INFO) << "End: " << current_split_ << " " << PrettyDuration(split_time) << "\n"
<< "Begin: " << new_split_label;
}
current_split_ = new_split_label;
+ ATRACE_BEGIN(current_split_);
current_split_start_ns_ = current_time;
}
-void NewTimingLogger::EndSplit() {
+void TimingLogger::EndSplit() {
DCHECK(current_split_ != NULL);
uint64_t current_time = NanoTime();
uint64_t split_time = current_time - current_split_start_ns_;
+ ATRACE_END();
if (verbose_) {
LOG(INFO) << "End: " << current_split_ << " " << PrettyDuration(split_time);
}
splits_.push_back(std::pair<uint64_t, const char*>(split_time, current_split_));
}
-uint64_t NewTimingLogger::GetTotalNs() const {
+uint64_t TimingLogger::GetTotalNs() const {
uint64_t total_ns = 0;
typedef std::vector<std::pair<uint64_t, const char*> >::const_iterator It;
for (It it = splits_.begin(), end = splits_.end(); it != end; ++it) {
@@ -237,7 +194,7 @@
return total_ns;
}
-void NewTimingLogger::Dump(std::ostream &os) const {
+void TimingLogger::Dump(std::ostream &os) const {
uint64_t longest_split = 0;
uint64_t total_ns = 0;
typedef std::vector<std::pair<uint64_t, const char*> >::const_iterator It;
diff --git a/runtime/base/timing_logger.h b/runtime/base/timing_logger.h
index 0f00a04..0998837 100644
--- a/runtime/base/timing_logger.h
+++ b/runtime/base/timing_logger.h
@@ -26,27 +26,8 @@
namespace art {
-class CumulativeLogger;
-
-class TimingLogger {
- public:
- explicit TimingLogger(const std::string& name, bool precise);
- void AddSplit(const std::string& label);
- void Dump(std::ostream& os) const;
- void Reset();
- uint64_t GetTotalNs() const;
-
- protected:
- const std::string name_;
- const bool precise_;
- std::vector<uint64_t> times_;
- std::vector<std::string> labels_;
-
- friend class CumulativeLogger;
-};
-
namespace base {
- class NewTimingLogger;
+ class TimingLogger;
} // namespace base
class CumulativeLogger {
@@ -62,8 +43,7 @@
// Allow the name to be modified, particularly when the cumulative logger is a field within a
// parent class that is unable to determine the "name" of a sub-class.
void SetName(const std::string& name);
- void AddLogger(const TimingLogger& logger) LOCKS_EXCLUDED(lock_);
- void AddNewLogger(const base::NewTimingLogger& logger) LOCKS_EXCLUDED(lock_);
+ void AddLogger(const base::TimingLogger& logger) LOCKS_EXCLUDED(lock_);
private:
void AddPair(const std::string &label, uint64_t delta_time)
@@ -84,16 +64,15 @@
namespace base {
// A replacement to timing logger that know when a split starts for the purposes of logging.
-// TODO: replace uses of TimingLogger with base::NewTimingLogger.
-class NewTimingLogger {
+class TimingLogger {
public:
- explicit NewTimingLogger(const char* name, bool precise, bool verbose);
+ explicit TimingLogger(const char* name, bool precise, bool verbose);
// Clears current splits and labels.
void Reset();
// Starts a split, a split shouldn't be in progress.
- void StartSplit(const char* new_split_label);
+ void StartSplit(const char* new_split_label);
// Ends the current split and starts the one given by the label.
void NewSplit(const char* new_split_label);
@@ -111,7 +90,7 @@
protected:
// The name of the timing logger.
- const std::string name_;
+ const char* name_;
// Do we want to print the exactly recorded split (true) or round down to the time unit being
// used (false).
@@ -130,7 +109,7 @@
std::vector<std::pair<uint64_t, const char*> > splits_;
private:
- DISALLOW_COPY_AND_ASSIGN(NewTimingLogger);
+ DISALLOW_COPY_AND_ASSIGN(TimingLogger);
};
} // namespace base
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 40033b7..84f186d 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -58,10 +58,7 @@
#include "object_utils.h"
#include "os.h"
#include "runtime.h"
-#include "runtime_support.h"
-#if defined(ART_USE_PORTABLE_COMPILER)
-#include "runtime_support_llvm.h"
-#endif
+#include "entrypoints/entrypoint_utils.h"
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change.h"
#include "sirt_ref.h"
@@ -1022,15 +1019,19 @@
return;
}
- // Set entry points to interpreter for methods in interpreter only mode.
if (obj->IsMethod()) {
mirror::AbstractMethod* method = obj->AsMethod();
+ // Set entry points to interpreter for methods in interpreter only mode.
if (Runtime::Current()->GetInstrumentation()->InterpretOnly() && !method->IsNative()) {
method->SetEntryPointFromInterpreter(interpreter::artInterpreterToInterpreterEntry);
if (method != Runtime::Current()->GetResolutionMethod()) {
method->SetEntryPointFromCompiledCode(GetInterpreterEntryPoint());
}
}
+ // Populate native method pointer with jni lookup stub.
+ if (method->IsNative()) {
+ method->UnregisterNative(Thread::Current());
+ }
}
}
@@ -1523,6 +1524,13 @@
// Special case to get oat code without overwriting a trampoline.
const void* ClassLinker::GetOatCodeFor(const mirror::AbstractMethod* method) {
CHECK(!method->IsAbstract()) << PrettyMethod(method);
+ if (method->IsProxyMethod()) {
+#if !defined(ART_USE_PORTABLE_COMPILER)
+ return reinterpret_cast<void*>(art_quick_proxy_invoke_handler);
+#else
+ return reinterpret_cast<void*>(art_portable_proxy_invoke_handler);
+#endif
+ }
const void* result = GetOatMethodFor(method).GetCode();
if (result == NULL) {
// No code? You must mean to go into the interpreter.
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 75886cf..4659fd1 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -22,6 +22,7 @@
#include "class_linker-inl.h"
#include "common_test.h"
#include "dex_file.h"
+#include "entrypoints/entrypoint_utils.h"
#include "gc/heap.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache.h"
@@ -32,7 +33,6 @@
#include "mirror/object_array-inl.h"
#include "mirror/proxy.h"
#include "mirror/stack_trace_element.h"
-#include "runtime_support.h"
#include "sirt_ref.h"
using ::art::mirror::AbstractMethod;
diff --git a/runtime/common_test.h b/runtime/common_test.h
index 842f959..7ee6fe2 100644
--- a/runtime/common_test.h
+++ b/runtime/common_test.h
@@ -31,6 +31,7 @@
#include "class_linker.h"
#include "compiler/driver/compiler_driver.h"
#include "dex_file-inl.h"
+#include "entrypoints/entrypoint_utils.h"
#include "gc/heap.h"
#include "gtest/gtest.h"
#include "instruction_set.h"
@@ -39,7 +40,6 @@
#include "object_utils.h"
#include "os.h"
#include "runtime.h"
-#include "runtime_support.h"
#include "scoped_thread_state_change.h"
#include "ScopedLocalRef.h"
#include "thread.h"
@@ -473,7 +473,8 @@
void CompileMethod(mirror::AbstractMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
CHECK(method != NULL);
- TimingLogger timings("CommonTest::CompileMethod", false);
+ base::TimingLogger timings("CommonTest::CompileMethod", false, false);
+ timings.StartSplit("CompileOne");
compiler_driver_->CompileOne(method, timings);
MakeExecutable(method);
}
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 7ebd6a3..3591a50 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -20,6 +20,7 @@
#include <set>
+#include "arch/context.h"
#include "class_linker.h"
#include "class_linker-inl.h"
#include "dex_file-inl.h"
@@ -37,7 +38,6 @@
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/throwable.h"
-#include "oat/runtime/context.h"
#include "object_utils.h"
#include "safe_map.h"
#include "scoped_thread_state_change.h"
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index 6be249c..13b0f1c 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -281,9 +281,7 @@
// Returns the opcode field of the instruction.
Code Opcode() const {
- const uint16_t* insns = reinterpret_cast<const uint16_t*>(this);
- int opcode = *insns & 0xFF;
- return static_cast<Code>(opcode);
+ return static_cast<Code>(Fetch16(0) & 0xFF);
}
void SetOpcode(Code opcode) {
diff --git a/runtime/runtime_support.cc b/runtime/entrypoints/entrypoint_utils.cc
similarity index 89%
rename from runtime/runtime_support.cc
rename to runtime/entrypoints/entrypoint_utils.cc
index d28aad1..c297841 100644
--- a/runtime/runtime_support.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "runtime_support.h"
+#include "entrypoints/entrypoint_utils.h"
#include "class_linker-inl.h"
#include "dex_file-inl.h"
@@ -30,74 +30,6 @@
#include "ScopedLocalRef.h"
#include "well_known_classes.h"
-double art_l2d(int64_t l) {
- return static_cast<double>(l);
-}
-
-float art_l2f(int64_t l) {
- return static_cast<float>(l);
-}
-
-/*
- * Float/double conversion requires clamping to min and max of integer form. If
- * target doesn't support this normally, use these.
- */
-int64_t art_d2l(double d) {
- static const double kMaxLong = static_cast<double>(static_cast<int64_t>(0x7fffffffffffffffULL));
- static const double kMinLong = static_cast<double>(static_cast<int64_t>(0x8000000000000000ULL));
- if (d >= kMaxLong) {
- return static_cast<int64_t>(0x7fffffffffffffffULL);
- } else if (d <= kMinLong) {
- return static_cast<int64_t>(0x8000000000000000ULL);
- } else if (d != d) { // NaN case
- return 0;
- } else {
- return static_cast<int64_t>(d);
- }
-}
-
-int64_t art_f2l(float f) {
- static const float kMaxLong = static_cast<float>(static_cast<int64_t>(0x7fffffffffffffffULL));
- static const float kMinLong = static_cast<float>(static_cast<int64_t>(0x8000000000000000ULL));
- if (f >= kMaxLong) {
- return static_cast<int64_t>(0x7fffffffffffffffULL);
- } else if (f <= kMinLong) {
- return static_cast<int64_t>(0x8000000000000000ULL);
- } else if (f != f) { // NaN case
- return 0;
- } else {
- return static_cast<int64_t>(f);
- }
-}
-
-int32_t art_d2i(double d) {
- static const double kMaxInt = static_cast<double>(static_cast<int32_t>(0x7fffffffUL));
- static const double kMinInt = static_cast<double>(static_cast<int32_t>(0x80000000UL));
- if (d >= kMaxInt) {
- return static_cast<int32_t>(0x7fffffffUL);
- } else if (d <= kMinInt) {
- return static_cast<int32_t>(0x80000000UL);
- } else if (d != d) { // NaN case
- return 0;
- } else {
- return static_cast<int32_t>(d);
- }
-}
-
-int32_t art_f2i(float f) {
- static const float kMaxInt = static_cast<float>(static_cast<int32_t>(0x7fffffffUL));
- static const float kMinInt = static_cast<float>(static_cast<int32_t>(0x80000000UL));
- if (f >= kMaxInt) {
- return static_cast<int32_t>(0x7fffffffUL);
- } else if (f <= kMinInt) {
- return static_cast<int32_t>(0x80000000UL);
- } else if (f != f) { // NaN case
- return 0;
- } else {
- return static_cast<int32_t>(f);
- }
-}
-
namespace art {
// Helper function to allocate array for FILLED_NEW_ARRAY.
diff --git a/runtime/runtime_support.h b/runtime/entrypoints/entrypoint_utils.h
similarity index 97%
rename from runtime/runtime_support.h
rename to runtime/entrypoints/entrypoint_utils.h
index 43c6784..3f28b5e 100644
--- a/runtime/runtime_support.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_RUNTIME_SUPPORT_H_
-#define ART_RUNTIME_RUNTIME_SUPPORT_H_
+#ifndef ART_RUNTIME_ENTRYPOINTS_ENTRYPOINT_UTILS_H_
+#define ART_RUNTIME_ENTRYPOINTS_ENTRYPOINT_UTILS_H_
#include "class_linker.h"
#include "common_throws.h"
@@ -42,13 +42,6 @@
extern "C" void art_quick_proxy_invoke_handler();
extern "C" void art_work_around_app_jni_bugs();
-extern "C" double art_l2d(int64_t l);
-extern "C" float art_l2f(int64_t l);
-extern "C" int64_t art_d2l(double d);
-extern "C" int32_t art_d2i(double d);
-extern "C" int64_t art_f2l(float f);
-extern "C" int32_t art_f2i(float f);
-
namespace art {
namespace mirror {
class Class;
@@ -416,4 +409,4 @@
} // namespace art
-#endif // ART_RUNTIME_RUNTIME_SUPPORT_H_
+#endif // ART_RUNTIME_ENTRYPOINTS_ENTRYPOINT_UTILS_H_
diff --git a/runtime/entrypoints/jni/jni_entrypoints.cc b/runtime/entrypoints/jni/jni_entrypoints.cc
new file mode 100644
index 0000000..98f7b12
--- /dev/null
+++ b/runtime/entrypoints/jni/jni_entrypoints.cc
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "base/logging.h"
+#include "mirror/abstract_method.h"
+#include "scoped_thread_state_change.h"
+#include "thread.h"
+
+namespace art {
+
+// Used by the JNI dlsym stub to find the native method to invoke if none is registered.
+extern "C" void* artFindNativeMethod(Thread* self) {
+ Locks::mutator_lock_->AssertNotHeld(self); // We come here as Native.
+ DCHECK(Thread::Current() == self);
+ ScopedObjectAccess soa(self);
+
+ mirror::AbstractMethod* method = self->GetCurrentMethod(NULL);
+ DCHECK(method != NULL);
+
+ // Lookup symbol address for method, on failure we'll return NULL with an
+ // exception set, otherwise we return the address of the method we found.
+ void* native_code = soa.Vm()->FindCodeForNativeMethod(method);
+ if (native_code == NULL) {
+ DCHECK(self->IsExceptionPending());
+ return NULL;
+ } else {
+ // Register so that future calls don't come here
+ method->RegisterNative(self, native_code);
+ return native_code;
+ }
+}
+
+} // namespace art
diff --git a/runtime/entrypoints/math_entrypoints.cc b/runtime/entrypoints/math_entrypoints.cc
new file mode 100644
index 0000000..31d13c8
--- /dev/null
+++ b/runtime/entrypoints/math_entrypoints.cc
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "math_entrypoints.h"
+
+namespace art {
+
+extern "C" double art_l2d(int64_t l) {
+ return static_cast<double>(l);
+}
+
+extern "C" float art_l2f(int64_t l) {
+ return static_cast<float>(l);
+}
+
+/*
+ * Float/double conversion requires clamping to min and max of integer form. If
+ * target doesn't support this normally, use these.
+ */
+extern "C" int64_t art_d2l(double d) {
+ static const double kMaxLong = static_cast<double>(static_cast<int64_t>(0x7fffffffffffffffULL));
+ static const double kMinLong = static_cast<double>(static_cast<int64_t>(0x8000000000000000ULL));
+ if (d >= kMaxLong) {
+ return static_cast<int64_t>(0x7fffffffffffffffULL);
+ } else if (d <= kMinLong) {
+ return static_cast<int64_t>(0x8000000000000000ULL);
+ } else if (d != d) { // NaN case
+ return 0;
+ } else {
+ return static_cast<int64_t>(d);
+ }
+}
+
+extern "C" int64_t art_f2l(float f) {
+ static const float kMaxLong = static_cast<float>(static_cast<int64_t>(0x7fffffffffffffffULL));
+ static const float kMinLong = static_cast<float>(static_cast<int64_t>(0x8000000000000000ULL));
+ if (f >= kMaxLong) {
+ return static_cast<int64_t>(0x7fffffffffffffffULL);
+ } else if (f <= kMinLong) {
+ return static_cast<int64_t>(0x8000000000000000ULL);
+ } else if (f != f) { // NaN case
+ return 0;
+ } else {
+ return static_cast<int64_t>(f);
+ }
+}
+
+extern "C" int32_t art_d2i(double d) {
+ static const double kMaxInt = static_cast<double>(static_cast<int32_t>(0x7fffffffUL));
+ static const double kMinInt = static_cast<double>(static_cast<int32_t>(0x80000000UL));
+ if (d >= kMaxInt) {
+ return static_cast<int32_t>(0x7fffffffUL);
+ } else if (d <= kMinInt) {
+ return static_cast<int32_t>(0x80000000UL);
+ } else if (d != d) { // NaN case
+ return 0;
+ } else {
+ return static_cast<int32_t>(d);
+ }
+}
+
+extern "C" int32_t art_f2i(float f) {
+ static const float kMaxInt = static_cast<float>(static_cast<int32_t>(0x7fffffffUL));
+ static const float kMinInt = static_cast<float>(static_cast<int32_t>(0x80000000UL));
+ if (f >= kMaxInt) {
+ return static_cast<int32_t>(0x7fffffffUL);
+ } else if (f <= kMinInt) {
+ return static_cast<int32_t>(0x80000000UL);
+ } else if (f != f) { // NaN case
+ return 0;
+ } else {
+ return static_cast<int32_t>(f);
+ }
+}
+
+} // namespace art
diff --git a/runtime/entrypoints/math_entrypoints.h b/runtime/entrypoints/math_entrypoints.h
new file mode 100644
index 0000000..717c734
--- /dev/null
+++ b/runtime/entrypoints/math_entrypoints.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ENTRYPOINTS_MATH_ENTRYPOINTS_H_
+#define ART_RUNTIME_ENTRYPOINTS_MATH_ENTRYPOINTS_H_
+
+#include <stdint.h>
+
+extern "C" double art_l2d(int64_t l);
+extern "C" float art_l2f(int64_t l);
+extern "C" int64_t art_d2l(double d);
+extern "C" int32_t art_d2i(double d);
+extern "C" int64_t art_f2l(float f);
+extern "C" int32_t art_f2i(float f);
+
+#endif // ART_RUNTIME_ENTRYPOINTS_MATH_ENTRYPOINTS_H_
diff --git a/runtime/runtime_support_test.cc b/runtime/entrypoints/math_entrypoints_test.cc
similarity index 89%
rename from runtime/runtime_support_test.cc
rename to runtime/entrypoints/math_entrypoints_test.cc
index b827813..ca8b931 100644
--- a/runtime/runtime_support_test.cc
+++ b/runtime/entrypoints/math_entrypoints_test.cc
@@ -14,16 +14,16 @@
* limitations under the License.
*/
-#include "runtime_support.h"
+#include "math_entrypoints.h"
#include "common_test.h"
#include <limits>
namespace art {
-class RuntimeSupportTest : public CommonTest {};
+class MathEntrypointsTest : public CommonTest {};
-TEST_F(RuntimeSupportTest, DoubleToLong) {
+TEST_F(MathEntrypointsTest, DoubleToLong) {
EXPECT_EQ(std::numeric_limits<int64_t>::max(), art_d2l(1.85e19));
EXPECT_EQ(std::numeric_limits<int64_t>::min(), art_d2l(-1.85e19));
EXPECT_EQ(0LL, art_d2l(0));
@@ -35,7 +35,7 @@
EXPECT_EQ(-100LL, art_d2l(-100.0));
}
-TEST_F(RuntimeSupportTest, FloatToLong) {
+TEST_F(MathEntrypointsTest, FloatToLong) {
EXPECT_EQ(std::numeric_limits<int64_t>::max(), art_f2l(1.85e19));
EXPECT_EQ(std::numeric_limits<int64_t>::min(), art_f2l(-1.85e19));
EXPECT_EQ(0LL, art_f2l(0));
@@ -47,7 +47,7 @@
EXPECT_EQ(-100LL, art_f2l(-100.0));
}
-TEST_F(RuntimeSupportTest, DoubleToInt) {
+TEST_F(MathEntrypointsTest, DoubleToInt) {
EXPECT_EQ(std::numeric_limits<int32_t>::max(), art_d2i(4.3e9));
EXPECT_EQ(std::numeric_limits<int32_t>::min(), art_d2i(-4.3e9));
EXPECT_EQ(0L, art_d2i(0));
@@ -59,7 +59,7 @@
EXPECT_EQ(-100L, art_d2i(-100.0));
}
-TEST_F(RuntimeSupportTest, FloatToInt) {
+TEST_F(MathEntrypointsTest, FloatToInt) {
EXPECT_EQ(std::numeric_limits<int32_t>::max(), art_f2i(4.3e9));
EXPECT_EQ(std::numeric_limits<int32_t>::min(), art_f2i(-4.3e9));
EXPECT_EQ(0L, art_f2i(0));
diff --git a/runtime/entrypoints/portable/portable_alloc_entrypoints.cc b/runtime/entrypoints/portable/portable_alloc_entrypoints.cc
new file mode 100644
index 0000000..2869269
--- /dev/null
+++ b/runtime/entrypoints/portable/portable_alloc_entrypoints.cc
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "entrypoints/entrypoint_utils.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object-inl.h"
+
+namespace art {
+
+extern "C" mirror::Object* art_portable_alloc_object_from_code(uint32_t type_idx,
+ mirror::AbstractMethod* referrer,
+ Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return AllocObjectFromCode(type_idx, referrer, thread, false);
+}
+
+extern "C" mirror::Object* art_portable_alloc_object_from_code_with_access_check(uint32_t type_idx,
+ mirror::AbstractMethod* referrer,
+ Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return AllocObjectFromCode(type_idx, referrer, thread, true);
+}
+
+extern "C" mirror::Object* art_portable_alloc_array_from_code(uint32_t type_idx,
+ mirror::AbstractMethod* referrer,
+ uint32_t length,
+ Thread* self)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return AllocArrayFromCode(type_idx, referrer, length, self, false);
+}
+
+extern "C" mirror::Object* art_portable_alloc_array_from_code_with_access_check(uint32_t type_idx,
+ mirror::AbstractMethod* referrer,
+ uint32_t length,
+ Thread* self)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return AllocArrayFromCode(type_idx, referrer, length, self, true);
+}
+
+extern "C" mirror::Object* art_portable_check_and_alloc_array_from_code(uint32_t type_idx,
+ mirror::AbstractMethod* referrer,
+ uint32_t length,
+ Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, false);
+}
+
+extern "C" mirror::Object* art_portable_check_and_alloc_array_from_code_with_access_check(uint32_t type_idx,
+ mirror::AbstractMethod* referrer,
+ uint32_t length,
+ Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, true);
+}
+
+} // namespace art
diff --git a/runtime/entrypoints/portable/portable_argument_visitor.h b/runtime/entrypoints/portable/portable_argument_visitor.h
new file mode 100644
index 0000000..f268baf
--- /dev/null
+++ b/runtime/entrypoints/portable/portable_argument_visitor.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ARGUMENT_VISITOR_H_
+#define ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ARGUMENT_VISITOR_H_
+
+#include "object_utils.h"
+
+namespace art {
+
+// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame.
+class PortableArgumentVisitor {
+ public:
+// Offset to first (not the Method*) argument in a Runtime::kRefAndArgs callee save frame.
+// Size of Runtime::kRefAndArgs callee save frame.
+// Size of Method* and register parameters in out stack arguments.
+#if defined(__arm__)
+#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 8
+#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 48
+#define PORTABLE_STACK_ARG_SKIP 0
+#elif defined(__mips__)
+#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4
+#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 64
+#define PORTABLE_STACK_ARG_SKIP 16
+#elif defined(__i386__)
+#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4
+#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 32
+#define PORTABLE_STACK_ARG_SKIP 4
+#else
+#error "Unsupported architecture"
+#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 0
+#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 0
+#define PORTABLE_STACK_ARG_SKIP 0
+#endif
+
+ PortableArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
+ caller_mh_(caller_mh),
+ args_in_regs_(ComputeArgsInRegs(caller_mh)),
+ num_params_(caller_mh.NumArgs()),
+ reg_args_(reinterpret_cast<byte*>(sp) + PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET),
+ stack_args_(reinterpret_cast<byte*>(sp) + PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE
+ + PORTABLE_STACK_ARG_SKIP),
+ cur_args_(reg_args_),
+ cur_arg_index_(0),
+ param_index_(0) {
+ }
+
+ virtual ~PortableArgumentVisitor() {}
+
+ virtual void Visit() = 0;
+
+ bool IsParamAReference() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return caller_mh_.IsParamAReference(param_index_);
+ }
+
+ bool IsParamALongOrDouble() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return caller_mh_.IsParamALongOrDouble(param_index_);
+ }
+
+ Primitive::Type GetParamPrimitiveType() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return caller_mh_.GetParamPrimitiveType(param_index_);
+ }
+
+ byte* GetParamAddress() const {
+ return cur_args_ + (cur_arg_index_ * kPointerSize);
+ }
+
+ void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ for (cur_arg_index_ = 0; cur_arg_index_ < args_in_regs_ && param_index_ < num_params_; ) {
+#if (defined(__arm__) || defined(__mips__))
+ if (IsParamALongOrDouble() && cur_arg_index_ == 2) {
+ break;
+ }
+#endif
+ Visit();
+ cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1);
+ param_index_++;
+ }
+ cur_args_ = stack_args_;
+ cur_arg_index_ = 0;
+ while (param_index_ < num_params_) {
+#if (defined(__arm__) || defined(__mips__))
+ if (IsParamALongOrDouble() && cur_arg_index_ % 2 != 0) {
+ cur_arg_index_++;
+ }
+#endif
+ Visit();
+ cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1);
+ param_index_++;
+ }
+ }
+
+ private:
+ static size_t ComputeArgsInRegs(MethodHelper& mh) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+#if (defined(__i386__))
+ return 0;
+#else
+ size_t args_in_regs = 0;
+ size_t num_params = mh.NumArgs();
+ for (size_t i = 0; i < num_params; i++) {
+ args_in_regs = args_in_regs + (mh.IsParamALongOrDouble(i) ? 2 : 1);
+ if (args_in_regs > 3) {
+ args_in_regs = 3;
+ break;
+ }
+ }
+ return args_in_regs;
+#endif
+ }
+ MethodHelper& caller_mh_;
+ const size_t args_in_regs_;
+ const size_t num_params_;
+ byte* const reg_args_;
+ byte* const stack_args_;
+ byte* cur_args_;
+ size_t cur_arg_index_;
+ size_t param_index_;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ARGUMENT_VISITOR_H_
diff --git a/runtime/entrypoints/portable/portable_cast_entrypoints.cc b/runtime/entrypoints/portable/portable_cast_entrypoints.cc
new file mode 100644
index 0000000..d343c5d
--- /dev/null
+++ b/runtime/entrypoints/portable/portable_cast_entrypoints.cc
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "common_throws.h"
+#include "entrypoints/entrypoint_utils.h"
+#include "mirror/object-inl.h"
+
+namespace art {
+
+extern "C" int32_t art_portable_is_assignable_from_code(const mirror::Class* dest_type,
+ const mirror::Class* src_type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(dest_type != NULL);
+ DCHECK(src_type != NULL);
+ return dest_type->IsAssignableFrom(src_type) ? 1 : 0;
+}
+
+extern "C" void art_portable_check_cast_from_code(const mirror::Class* dest_type,
+ const mirror::Class* src_type)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(dest_type->IsClass()) << PrettyClass(dest_type);
+ DCHECK(src_type->IsClass()) << PrettyClass(src_type);
+ if (UNLIKELY(!dest_type->IsAssignableFrom(src_type))) {
+ ThrowClassCastException(dest_type, src_type);
+ }
+}
+
+extern "C" void art_portable_check_put_array_element_from_code(const mirror::Object* element,
+ const mirror::Object* array)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (element == NULL) {
+ return;
+ }
+ DCHECK(array != NULL);
+ mirror::Class* array_class = array->GetClass();
+ DCHECK(array_class != NULL);
+ mirror::Class* component_type = array_class->GetComponentType();
+ mirror::Class* element_class = element->GetClass();
+ if (UNLIKELY(!component_type->IsAssignableFrom(element_class))) {
+ ThrowArrayStoreException(element_class, array_class);
+ }
+}
+
+} // namespace art
diff --git a/runtime/entrypoints/portable/portable_dexcache_entrypoints.cc b/runtime/entrypoints/portable/portable_dexcache_entrypoints.cc
new file mode 100644
index 0000000..bdab587
--- /dev/null
+++ b/runtime/entrypoints/portable/portable_dexcache_entrypoints.cc
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "entrypoints/entrypoint_utils.h"
+#include "gc/accounting/card_table-inl.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object-inl.h"
+
+namespace art {
+
+extern "C" mirror::Object* art_portable_initialize_static_storage_from_code(uint32_t type_idx,
+ mirror::AbstractMethod* referrer,
+ Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return ResolveVerifyAndClinit(type_idx, referrer, thread, true, false);
+}
+
+extern "C" mirror::Object* art_portable_initialize_type_from_code(uint32_t type_idx,
+ mirror::AbstractMethod* referrer,
+ Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return ResolveVerifyAndClinit(type_idx, referrer, thread, false, false);
+}
+
+extern "C" mirror::Object* art_portable_initialize_type_and_verify_access_from_code(uint32_t type_idx,
+ mirror::AbstractMethod* referrer,
+ Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Called when caller isn't guaranteed to have access to a type and the dex cache may be
+ // unpopulated
+ return ResolveVerifyAndClinit(type_idx, referrer, thread, false, true);
+}
+
+extern "C" mirror::Object* art_portable_resolve_string_from_code(mirror::AbstractMethod* referrer,
+ uint32_t string_idx)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return ResolveStringFromCode(referrer, string_idx);
+}
+
+} // namespace art
diff --git a/runtime/entrypoints/portable/portable_entrypoints.h b/runtime/entrypoints/portable/portable_entrypoints.h
new file mode 100644
index 0000000..a229c76
--- /dev/null
+++ b/runtime/entrypoints/portable/portable_entrypoints.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ENTRYPOINTS_H_
+#define ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ENTRYPOINTS_H_
+
+#include "dex_file-inl.h"
+#include "runtime.h"
+
+namespace art {
+namespace mirror {
+ class AbstractMethod;
+ class Object;
+} // namespace mirror
+class Thread;
+
+#define PORTABLE_ENTRYPOINT_OFFSET(x) \
+ (static_cast<uintptr_t>(OFFSETOF_MEMBER(Thread, portable_entrypoints_)) + \
+ static_cast<uintptr_t>(OFFSETOF_MEMBER(PortableEntryPoints, x)))
+
+// Pointers to functions that are called by code generated by compiler's adhering to the portable
+// compiler ABI.
+struct PACKED(4) PortableEntryPoints {
+ // Invocation
+ const void* (*pPortableResolutionTrampolineFromCode)(mirror::AbstractMethod*, mirror::Object*,
+ mirror::AbstractMethod**, Thread*);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_ENTRYPOINTS_PORTABLE_PORTABLE_ENTRYPOINTS_H_
diff --git a/runtime/entrypoints/portable/portable_field_entrypoints.cc b/runtime/entrypoints/portable/portable_field_entrypoints.cc
new file mode 100644
index 0000000..aa0f03c
--- /dev/null
+++ b/runtime/entrypoints/portable/portable_field_entrypoints.cc
@@ -0,0 +1,241 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "entrypoints/entrypoint_utils.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/field-inl.h"
+#include "mirror/object-inl.h"
+
+namespace art {
+
+extern "C" int32_t art_portable_set32_static_from_code(uint32_t field_idx,
+ mirror::AbstractMethod* referrer,
+ int32_t new_value)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx,
+ referrer,
+ StaticPrimitiveWrite,
+ sizeof(uint32_t));
+ if (LIKELY(field != NULL)) {
+ field->Set32(field->GetDeclaringClass(), new_value);
+ return 0;
+ }
+ field = FindFieldFromCode(field_idx,
+ referrer,
+ Thread::Current(),
+ StaticPrimitiveWrite,
+ sizeof(uint32_t),
+ true);
+ if (LIKELY(field != NULL)) {
+ field->Set32(field->GetDeclaringClass(), new_value);
+ return 0;
+ }
+ return -1;
+}
+
+extern "C" int32_t art_portable_set64_static_from_code(uint32_t field_idx,
+ mirror::AbstractMethod* referrer,
+ int64_t new_value)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(uint64_t));
+ if (LIKELY(field != NULL)) {
+ field->Set64(field->GetDeclaringClass(), new_value);
+ return 0;
+ }
+ field = FindFieldFromCode(field_idx,
+ referrer,
+ Thread::Current(),
+ StaticPrimitiveWrite,
+ sizeof(uint64_t),
+ true);
+ if (LIKELY(field != NULL)) {
+ field->Set64(field->GetDeclaringClass(), new_value);
+ return 0;
+ }
+ return -1;
+}
+
+extern "C" int32_t art_portable_set_obj_static_from_code(uint32_t field_idx,
+ mirror::AbstractMethod* referrer,
+ mirror::Object* new_value)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, StaticObjectWrite,
+ sizeof(mirror::Object*));
+ if (LIKELY(field != NULL)) {
+ field->SetObj(field->GetDeclaringClass(), new_value);
+ return 0;
+ }
+ field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
+ StaticObjectWrite, sizeof(mirror::Object*), true);
+ if (LIKELY(field != NULL)) {
+ field->SetObj(field->GetDeclaringClass(), new_value);
+ return 0;
+ }
+ return -1;
+}
+
+extern "C" int32_t art_portable_get32_static_from_code(uint32_t field_idx,
+ mirror::AbstractMethod* referrer)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint32_t));
+ if (LIKELY(field != NULL)) {
+ return field->Get32(field->GetDeclaringClass());
+ }
+ field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
+ StaticPrimitiveRead, sizeof(uint32_t), true);
+ if (LIKELY(field != NULL)) {
+ return field->Get32(field->GetDeclaringClass());
+ }
+ return 0;
+}
+
+extern "C" int64_t art_portable_get64_static_from_code(uint32_t field_idx,
+ mirror::AbstractMethod* referrer)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint64_t));
+ if (LIKELY(field != NULL)) {
+ return field->Get64(field->GetDeclaringClass());
+ }
+ field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
+ StaticPrimitiveRead, sizeof(uint64_t), true);
+ if (LIKELY(field != NULL)) {
+ return field->Get64(field->GetDeclaringClass());
+ }
+ return 0;
+}
+
+extern "C" mirror::Object* art_portable_get_obj_static_from_code(uint32_t field_idx,
+ mirror::AbstractMethod* referrer)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, StaticObjectRead,
+ sizeof(mirror::Object*));
+ if (LIKELY(field != NULL)) {
+ return field->GetObj(field->GetDeclaringClass());
+ }
+ field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
+ StaticObjectRead, sizeof(mirror::Object*), true);
+ if (LIKELY(field != NULL)) {
+ return field->GetObj(field->GetDeclaringClass());
+ }
+ return 0;
+}
+
+extern "C" int32_t art_portable_set32_instance_from_code(uint32_t field_idx,
+ mirror::AbstractMethod* referrer,
+ mirror::Object* obj, uint32_t new_value)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint32_t));
+ if (LIKELY(field != NULL)) {
+ field->Set32(obj, new_value);
+ return 0;
+ }
+ field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
+ InstancePrimitiveWrite, sizeof(uint32_t), true);
+ if (LIKELY(field != NULL)) {
+ field->Set32(obj, new_value);
+ return 0;
+ }
+ return -1;
+}
+
+extern "C" int32_t art_portable_set64_instance_from_code(uint32_t field_idx,
+ mirror::AbstractMethod* referrer,
+ mirror::Object* obj, int64_t new_value)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint64_t));
+ if (LIKELY(field != NULL)) {
+ field->Set64(obj, new_value);
+ return 0;
+ }
+ field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
+ InstancePrimitiveWrite, sizeof(uint64_t), true);
+ if (LIKELY(field != NULL)) {
+ field->Set64(obj, new_value);
+ return 0;
+ }
+ return -1;
+}
+
+extern "C" int32_t art_portable_set_obj_instance_from_code(uint32_t field_idx,
+ mirror::AbstractMethod* referrer,
+ mirror::Object* obj,
+ mirror::Object* new_value)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite,
+ sizeof(mirror::Object*));
+ if (LIKELY(field != NULL)) {
+ field->SetObj(obj, new_value);
+ return 0;
+ }
+ field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
+ InstanceObjectWrite, sizeof(mirror::Object*), true);
+ if (LIKELY(field != NULL)) {
+ field->SetObj(obj, new_value);
+ return 0;
+ }
+ return -1;
+}
+
+extern "C" int32_t art_portable_get32_instance_from_code(uint32_t field_idx,
+ mirror::AbstractMethod* referrer,
+ mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint32_t));
+ if (LIKELY(field != NULL)) {
+ return field->Get32(obj);
+ }
+ field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
+ InstancePrimitiveRead, sizeof(uint32_t), true);
+ if (LIKELY(field != NULL)) {
+ return field->Get32(obj);
+ }
+ return 0;
+}
+
+extern "C" int64_t art_portable_get64_instance_from_code(uint32_t field_idx,
+ mirror::AbstractMethod* referrer,
+ mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint64_t));
+ if (LIKELY(field != NULL)) {
+ return field->Get64(obj);
+ }
+ field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
+ InstancePrimitiveRead, sizeof(uint64_t), true);
+ if (LIKELY(field != NULL)) {
+ return field->Get64(obj);
+ }
+ return 0;
+}
+
+extern "C" mirror::Object* art_portable_get_obj_instance_from_code(uint32_t field_idx,
+ mirror::AbstractMethod* referrer,
+ mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Field* field = FindFieldFast(field_idx, referrer, InstanceObjectRead,
+ sizeof(mirror::Object*));
+ if (LIKELY(field != NULL)) {
+ return field->GetObj(obj);
+ }
+ field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
+ InstanceObjectRead, sizeof(mirror::Object*), true);
+ if (LIKELY(field != NULL)) {
+ return field->GetObj(obj);
+ }
+ return 0;
+}
+
+} // namespace art
diff --git a/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc b/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc
new file mode 100644
index 0000000..771608b
--- /dev/null
+++ b/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dex_instruction.h"
+#include "entrypoints/entrypoint_utils.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object-inl.h"
+
+namespace art {
+
+extern "C" void art_portable_fill_array_data_from_code(mirror::AbstractMethod* method,
+ uint32_t dex_pc,
+ mirror::Array* array,
+ uint32_t payload_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const DexFile::CodeItem* code_item = MethodHelper(method).GetCodeItem();
+ const Instruction::ArrayDataPayload* payload =
+ reinterpret_cast<const Instruction::ArrayDataPayload*>(code_item->insns_ + payload_offset);
+ DCHECK_EQ(payload->ident, static_cast<uint16_t>(Instruction::kArrayDataSignature));
+ if (UNLIKELY(array == NULL)) {
+ ThrowNullPointerException(NULL, "null array in FILL_ARRAY_DATA");
+ return; // Error
+ }
+ DCHECK(array->IsArrayInstance() && !array->IsObjectArray());
+ if (UNLIKELY(static_cast<int32_t>(payload->element_count) > array->GetLength())) {
+ Thread* self = Thread::Current();
+ ThrowLocation throw_location = self->GetCurrentLocationForThrow();
+ self->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayIndexOutOfBoundsException;",
+ "failed FILL_ARRAY_DATA; length=%d, index=%d",
+ array->GetLength(), payload->element_count - 1);
+ return; // Error
+ }
+ uint32_t size_in_bytes = payload->element_count * payload->element_width;
+ memcpy(array->GetRawData(payload->element_width), payload->data, size_in_bytes);
+}
+
+} // namespace art
diff --git a/runtime/entrypoints/portable/portable_invoke_entrypoints.cc b/runtime/entrypoints/portable/portable_invoke_entrypoints.cc
new file mode 100644
index 0000000..5911ba3
--- /dev/null
+++ b/runtime/entrypoints/portable/portable_invoke_entrypoints.cc
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "entrypoints/entrypoint_utils.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/dex_cache-inl.h"
+#include "mirror/object-inl.h"
+
+namespace art {
+
+static mirror::AbstractMethod* FindMethodHelper(uint32_t method_idx,
+ mirror::Object* this_object,
+ mirror::AbstractMethod* caller_method,
+ bool access_check,
+ InvokeType type,
+ Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::AbstractMethod* method = FindMethodFast(method_idx,
+ this_object,
+ caller_method,
+ access_check,
+ type);
+ if (UNLIKELY(method == NULL)) {
+ method = FindMethodFromCode(method_idx, this_object, caller_method,
+ thread, access_check, type);
+ if (UNLIKELY(method == NULL)) {
+ CHECK(thread->IsExceptionPending());
+ return 0; // failure
+ }
+ }
+ DCHECK(!thread->IsExceptionPending());
+ const void* code = method->GetEntryPointFromCompiledCode();
+
+ // When we return, the caller will branch to this address, so it had better not be 0!
+ if (UNLIKELY(code == NULL)) {
+ MethodHelper mh(method);
+ LOG(FATAL) << "Code was NULL in method: " << PrettyMethod(method)
+ << " location: " << mh.GetDexFile().GetLocation();
+ }
+ return method;
+}
+
+extern "C" mirror::Object* art_portable_find_static_method_from_code_with_access_check(uint32_t method_idx,
+ mirror::Object* this_object,
+ mirror::AbstractMethod* referrer,
+ Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return FindMethodHelper(method_idx, this_object, referrer, true, kStatic, thread);
+}
+
+extern "C" mirror::Object* art_portable_find_direct_method_from_code_with_access_check(uint32_t method_idx,
+ mirror::Object* this_object,
+ mirror::AbstractMethod* referrer,
+ Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return FindMethodHelper(method_idx, this_object, referrer, true, kDirect, thread);
+}
+
+extern "C" mirror::Object* art_portable_find_virtual_method_from_code_with_access_check(uint32_t method_idx,
+ mirror::Object* this_object,
+ mirror::AbstractMethod* referrer,
+ Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return FindMethodHelper(method_idx, this_object, referrer, true, kVirtual, thread);
+}
+
+extern "C" mirror::Object* art_portable_find_super_method_from_code_with_access_check(uint32_t method_idx,
+ mirror::Object* this_object,
+ mirror::AbstractMethod* referrer,
+ Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return FindMethodHelper(method_idx, this_object, referrer, true, kSuper, thread);
+}
+
+extern "C" mirror::Object* art_portable_find_interface_method_from_code_with_access_check(uint32_t method_idx,
+ mirror::Object* this_object,
+ mirror::AbstractMethod* referrer,
+ Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return FindMethodHelper(method_idx, this_object, referrer, true, kInterface, thread);
+}
+
+extern "C" mirror::Object* art_portable_find_interface_method_from_code(uint32_t method_idx,
+ mirror::Object* this_object,
+ mirror::AbstractMethod* referrer,
+ Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return FindMethodHelper(method_idx, this_object, referrer, false, kInterface, thread);
+}
+
+} // namespace art
diff --git a/runtime/entrypoints/portable/portable_jni_entrypoints.cc b/runtime/entrypoints/portable/portable_jni_entrypoints.cc
new file mode 100644
index 0000000..8df16ae
--- /dev/null
+++ b/runtime/entrypoints/portable/portable_jni_entrypoints.cc
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "entrypoints/entrypoint_utils.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object-inl.h"
+#include "thread-inl.h"
+
+namespace art {
+
+// Called on entry to JNI, transition out of Runnable and release share of mutator_lock_.
+extern "C" uint32_t art_portable_jni_method_start(Thread* self)
+ UNLOCK_FUNCTION(GlobalSynchronizatio::mutator_lock_) {
+ JNIEnvExt* env = self->GetJniEnv();
+ uint32_t saved_local_ref_cookie = env->local_ref_cookie;
+ env->local_ref_cookie = env->locals.GetSegmentState();
+ self->TransitionFromRunnableToSuspended(kNative);
+ return saved_local_ref_cookie;
+}
+
+extern "C" uint32_t art_portable_jni_method_start_synchronized(jobject to_lock, Thread* self)
+ UNLOCK_FUNCTION(Locks::mutator_lock_) {
+ self->DecodeJObject(to_lock)->MonitorEnter(self);
+ return art_portable_jni_method_start(self);
+}
+
+static void PopLocalReferences(uint32_t saved_local_ref_cookie, Thread* self) {
+ JNIEnvExt* env = self->GetJniEnv();
+ env->locals.SetSegmentState(env->local_ref_cookie);
+ env->local_ref_cookie = saved_local_ref_cookie;
+}
+
+extern "C" void art_portable_jni_method_end(uint32_t saved_local_ref_cookie, Thread* self)
+ SHARED_LOCK_FUNCTION(Locks::mutator_lock_) {
+ self->TransitionFromSuspendedToRunnable();
+ PopLocalReferences(saved_local_ref_cookie, self);
+}
+
+
+extern "C" void art_portable_jni_method_end_synchronized(uint32_t saved_local_ref_cookie,
+ jobject locked,
+ Thread* self)
+ SHARED_LOCK_FUNCTION(Locks::mutator_lock_) {
+ self->TransitionFromSuspendedToRunnable();
+ UnlockJniSynchronizedMethod(locked, self); // Must decode before pop.
+ PopLocalReferences(saved_local_ref_cookie, self);
+}
+
+extern "C" mirror::Object* art_portable_jni_method_end_with_reference(jobject result,
+ uint32_t saved_local_ref_cookie,
+ Thread* self)
+ SHARED_LOCK_FUNCTION(Locks::mutator_lock_) {
+ self->TransitionFromSuspendedToRunnable();
+ mirror::Object* o = self->DecodeJObject(result); // Must decode before pop.
+ PopLocalReferences(saved_local_ref_cookie, self);
+ // Process result.
+ if (UNLIKELY(self->GetJniEnv()->check_jni)) {
+ if (self->IsExceptionPending()) {
+ return NULL;
+ }
+ CheckReferenceResult(o, self);
+ }
+ return o;
+}
+
+extern "C" mirror::Object* art_portable_jni_method_end_with_reference_synchronized(jobject result,
+ uint32_t saved_local_ref_cookie,
+ jobject locked,
+ Thread* self)
+ SHARED_LOCK_FUNCTION(Locks::mutator_lock_) {
+ self->TransitionFromSuspendedToRunnable();
+ UnlockJniSynchronizedMethod(locked, self); // Must decode before pop.
+ mirror::Object* o = self->DecodeJObject(result);
+ PopLocalReferences(saved_local_ref_cookie, self);
+ // Process result.
+ if (UNLIKELY(self->GetJniEnv()->check_jni)) {
+ if (self->IsExceptionPending()) {
+ return NULL;
+ }
+ CheckReferenceResult(o, self);
+ }
+ return o;
+}
+
+} // namespace art
diff --git a/runtime/entrypoints/portable/portable_lock_entrypoints.cc b/runtime/entrypoints/portable/portable_lock_entrypoints.cc
new file mode 100644
index 0000000..44d3da9
--- /dev/null
+++ b/runtime/entrypoints/portable/portable_lock_entrypoints.cc
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "entrypoints/entrypoint_utils.h"
+#include "mirror/object-inl.h"
+
+namespace art {
+
+extern "C" void art_portable_lock_object_from_code(mirror::Object* obj, Thread* thread)
+ EXCLUSIVE_LOCK_FUNCTION(monitor_lock_) {
+ DCHECK(obj != NULL); // Assumed to have been checked before entry.
+ obj->MonitorEnter(thread); // May block.
+ DCHECK(thread->HoldsLock(obj));
+ // Only possible exception is NPE and is handled before entry.
+ DCHECK(!thread->IsExceptionPending());
+}
+
+extern "C" void art_portable_unlock_object_from_code(mirror::Object* obj, Thread* thread)
+ UNLOCK_FUNCTION(monitor_lock_) {
+ DCHECK(obj != NULL); // Assumed to have been checked before entry.
+ // MonitorExit may throw exception.
+ obj->MonitorExit(thread);
+}
+
+} // namespace art
diff --git a/runtime/entrypoints/portable/portable_proxy_entrypoints.cc b/runtime/entrypoints/portable/portable_proxy_entrypoints.cc
new file mode 100644
index 0000000..3db39cd
--- /dev/null
+++ b/runtime/entrypoints/portable/portable_proxy_entrypoints.cc
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "entrypoints/entrypoint_utils.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object-inl.h"
+#include "portable_argument_visitor.h"
+#include "scoped_thread_state_change.h"
+
+namespace art {
+
+// Visits arguments on the stack placing them into the args vector, Object* arguments are converted
+// to jobjects.
+class BuildPortableArgumentVisitor : public PortableArgumentVisitor {
+ public:
+ BuildPortableArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp,
+ ScopedObjectAccessUnchecked& soa, std::vector<jvalue>& args) :
+ PortableArgumentVisitor(caller_mh, sp), soa_(soa), args_(args) {}
+
+ virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ jvalue val;
+ Primitive::Type type = GetParamPrimitiveType();
+ switch (type) {
+ case Primitive::kPrimNot: {
+ mirror::Object* obj = *reinterpret_cast<mirror::Object**>(GetParamAddress());
+ val.l = soa_.AddLocalReference<jobject>(obj);
+ break;
+ }
+ case Primitive::kPrimLong: // Fall-through.
+ case Primitive::kPrimDouble:
+ val.j = *reinterpret_cast<jlong*>(GetParamAddress());
+ break;
+ case Primitive::kPrimBoolean: // Fall-through.
+ case Primitive::kPrimByte: // Fall-through.
+ case Primitive::kPrimChar: // Fall-through.
+ case Primitive::kPrimShort: // Fall-through.
+ case Primitive::kPrimInt: // Fall-through.
+ case Primitive::kPrimFloat:
+ val.i = *reinterpret_cast<jint*>(GetParamAddress());
+ break;
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "UNREACHABLE";
+ val.j = 0;
+ break;
+ }
+ args_.push_back(val);
+ }
+
+ private:
+ ScopedObjectAccessUnchecked& soa_;
+ std::vector<jvalue>& args_;
+
+ DISALLOW_COPY_AND_ASSIGN(BuildPortableArgumentVisitor);
+};
+
+// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method
+// which is responsible for recording callee save registers. We explicitly place into jobjects the
+// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a
+// field within the proxy object, which will box the primitive arguments and deal with error cases.
+extern "C" uint64_t artPortableProxyInvokeHandler(mirror::AbstractMethod* proxy_method,
+ mirror::Object* receiver,
+ Thread* self, mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Ensure we don't get thread suspension until the object arguments are safely in jobjects.
+ const char* old_cause =
+ self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
+ self->VerifyStack();
+ // Start new JNI local reference state.
+ JNIEnvExt* env = self->GetJniEnv();
+ ScopedObjectAccessUnchecked soa(env);
+ ScopedJniEnvLocalRefState env_state(env);
+ // Create local ref. copies of proxy method and the receiver.
+ jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
+
+ // Placing arguments into args vector and remove the receiver.
+ MethodHelper proxy_mh(proxy_method);
+ std::vector<jvalue> args;
+ BuildPortableArgumentVisitor local_ref_visitor(proxy_mh, sp, soa, args);
+ local_ref_visitor.VisitArguments();
+ args.erase(args.begin());
+
+ // Convert proxy method into expected interface method.
+ mirror::AbstractMethod* interface_method = proxy_method->FindOverriddenMethod();
+ DCHECK(interface_method != NULL);
+ DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
+ jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_method);
+
+ // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code
+ // that performs allocations.
+ self->EndAssertNoThreadSuspension(old_cause);
+ JValue result = InvokeProxyInvocationHandler(soa, proxy_mh.GetShorty(),
+ rcvr_jobj, interface_method_jobj, args);
+ return result.GetJ();
+}
+
+} // namespace art
diff --git a/runtime/entrypoints/portable/portable_stub_entrypoints.cc b/runtime/entrypoints/portable/portable_stub_entrypoints.cc
new file mode 100644
index 0000000..c510c65
--- /dev/null
+++ b/runtime/entrypoints/portable/portable_stub_entrypoints.cc
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dex_instruction-inl.h"
+#include "entrypoints/entrypoint_utils.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object-inl.h"
+
+namespace art {
+
+// Lazily resolve a method for portable. Called by stub code.
+extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called,
+ mirror::Object* receiver,
+ mirror::AbstractMethod** called_addr,
+ Thread* thread)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t dex_pc;
+ mirror::AbstractMethod* caller = thread->GetCurrentMethod(&dex_pc);
+
+ ClassLinker* linker = Runtime::Current()->GetClassLinker();
+ InvokeType invoke_type;
+ bool is_range;
+ if (called->IsRuntimeMethod()) {
+ const DexFile::CodeItem* code = MethodHelper(caller).GetCodeItem();
+ CHECK_LT(dex_pc, code->insns_size_in_code_units_);
+ const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
+ Instruction::Code instr_code = instr->Opcode();
+ switch (instr_code) {
+ case Instruction::INVOKE_DIRECT:
+ invoke_type = kDirect;
+ is_range = false;
+ break;
+ case Instruction::INVOKE_DIRECT_RANGE:
+ invoke_type = kDirect;
+ is_range = true;
+ break;
+ case Instruction::INVOKE_STATIC:
+ invoke_type = kStatic;
+ is_range = false;
+ break;
+ case Instruction::INVOKE_STATIC_RANGE:
+ invoke_type = kStatic;
+ is_range = true;
+ break;
+ case Instruction::INVOKE_SUPER:
+ invoke_type = kSuper;
+ is_range = false;
+ break;
+ case Instruction::INVOKE_SUPER_RANGE:
+ invoke_type = kSuper;
+ is_range = true;
+ break;
+ case Instruction::INVOKE_VIRTUAL:
+ invoke_type = kVirtual;
+ is_range = false;
+ break;
+ case Instruction::INVOKE_VIRTUAL_RANGE:
+ invoke_type = kVirtual;
+ is_range = true;
+ break;
+ case Instruction::INVOKE_INTERFACE:
+ invoke_type = kInterface;
+ is_range = false;
+ break;
+ case Instruction::INVOKE_INTERFACE_RANGE:
+ invoke_type = kInterface;
+ is_range = true;
+ break;
+ default:
+ LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL);
+ // Avoid used uninitialized warnings.
+ invoke_type = kDirect;
+ is_range = true;
+ }
+ uint32_t dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c();
+ called = linker->ResolveMethod(dex_method_idx, caller, invoke_type);
+ // Refine called method based on receiver.
+ if (invoke_type == kVirtual) {
+ called = receiver->GetClass()->FindVirtualMethodForVirtual(called);
+ } else if (invoke_type == kInterface) {
+ called = receiver->GetClass()->FindVirtualMethodForInterface(called);
+ }
+ } else {
+ CHECK(called->IsStatic()) << PrettyMethod(called);
+ invoke_type = kStatic;
+ }
+ const void* code = NULL;
+ if (LIKELY(!thread->IsExceptionPending())) {
+ // Incompatible class change should have been handled in resolve method.
+ CHECK(!called->CheckIncompatibleClassChange(invoke_type));
+ // Ensure that the called method's class is initialized.
+ mirror::Class* called_class = called->GetDeclaringClass();
+ linker->EnsureInitialized(called_class, true, true);
+ if (LIKELY(called_class->IsInitialized())) {
+ code = called->GetEntryPointFromCompiledCode();
+ // TODO: remove this after we solve the link issue.
+ { // for lazy link.
+ if (code == NULL) {
+ code = linker->GetOatCodeFor(called);
+ }
+ }
+ } else if (called_class->IsInitializing()) {
+ if (invoke_type == kStatic) {
+ // Class is still initializing, go to oat and grab code (trampoline must be left in place
+ // until class is initialized to stop races between threads).
+ code = linker->GetOatCodeFor(called);
+ } else {
+ // No trampoline for non-static methods.
+ code = called->GetEntryPointFromCompiledCode();
+ // TODO: remove this after we solve the link issue.
+ { // for lazy link.
+ if (code == NULL) {
+ code = linker->GetOatCodeFor(called);
+ }
+ }
+ }
+ } else {
+ DCHECK(called_class->IsErroneous());
+ }
+ }
+ if (LIKELY(code != NULL)) {
+ // Expect class to at least be initializing.
+ DCHECK(called->GetDeclaringClass()->IsInitializing());
+ // Don't want infinite recursion.
+ DCHECK(code != GetResolutionTrampoline(linker));
+ // Set up entry into main method
+ *called_addr = called;
+ }
+ return code;
+}
+
+} // namespace art
diff --git a/runtime/entrypoints/portable/portable_thread_entrypoints.cc b/runtime/entrypoints/portable/portable_thread_entrypoints.cc
new file mode 100644
index 0000000..dac7388
--- /dev/null
+++ b/runtime/entrypoints/portable/portable_thread_entrypoints.cc
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "entrypoints/entrypoint_utils.h"
+#include "mirror/abstract_method.h"
+#include "mirror/object-inl.h"
+#include "verifier/dex_gc_map.h"
+#include "stack.h"
+
+namespace art {
+
+class ShadowFrameCopyVisitor : public StackVisitor {
+ public:
+ explicit ShadowFrameCopyVisitor(Thread* self) : StackVisitor(self, NULL), prev_frame_(NULL),
+ top_frame_(NULL) {}
+
+ bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (IsShadowFrame()) {
+ ShadowFrame* cur_frame = GetCurrentShadowFrame();
+ size_t num_regs = cur_frame->NumberOfVRegs();
+ mirror::AbstractMethod* method = cur_frame->GetMethod();
+ uint32_t dex_pc = cur_frame->GetDexPC();
+ ShadowFrame* new_frame = ShadowFrame::Create(num_regs, NULL, method, dex_pc);
+
+ const uint8_t* gc_map = method->GetNativeGcMap();
+ uint32_t gc_map_length = static_cast<uint32_t>((gc_map[0] << 24) |
+ (gc_map[1] << 16) |
+ (gc_map[2] << 8) |
+ (gc_map[3] << 0));
+ verifier::DexPcToReferenceMap dex_gc_map(gc_map + 4, gc_map_length);
+ const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc);
+ for (size_t reg = 0; reg < num_regs; ++reg) {
+ if (TestBitmap(reg, reg_bitmap)) {
+ new_frame->SetVRegReference(reg, cur_frame->GetVRegReference(reg));
+ } else {
+ new_frame->SetVReg(reg, cur_frame->GetVReg(reg));
+ }
+ }
+
+ if (prev_frame_ != NULL) {
+ prev_frame_->SetLink(new_frame);
+ } else {
+ top_frame_ = new_frame;
+ }
+ prev_frame_ = new_frame;
+ }
+ return true;
+ }
+
+ ShadowFrame* GetShadowFrameCopy() {
+ return top_frame_;
+ }
+
+ private:
+ static bool TestBitmap(int reg, const uint8_t* reg_vector) {
+ return ((reg_vector[reg / 8] >> (reg % 8)) & 0x01) != 0;
+ }
+
+ ShadowFrame* prev_frame_;
+ ShadowFrame* top_frame_;
+};
+
+extern "C" void art_portable_test_suspend_from_code(Thread* self)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ CheckSuspend(self);
+ if (Runtime::Current()->GetInstrumentation()->ShouldPortableCodeDeoptimize()) {
+ // Save out the shadow frame to the heap
+ ShadowFrameCopyVisitor visitor(self);
+ visitor.WalkStack(true);
+ self->SetDeoptimizationShadowFrame(visitor.GetShadowFrameCopy());
+ self->SetDeoptimizationReturnValue(JValue());
+ self->SetException(ThrowLocation(), reinterpret_cast<mirror::Throwable*>(-1));
+ }
+}
+
+extern "C" ShadowFrame* art_portable_push_shadow_frame_from_code(Thread* thread,
+ ShadowFrame* new_shadow_frame,
+ mirror::AbstractMethod* method,
+ uint32_t num_vregs) {
+ ShadowFrame* old_frame = thread->PushShadowFrame(new_shadow_frame);
+ new_shadow_frame->SetMethod(method);
+ new_shadow_frame->SetNumberOfVRegs(num_vregs);
+ return old_frame;
+}
+
+} // namespace art
diff --git a/runtime/entrypoints/portable/portable_throw_entrypoints.cc b/runtime/entrypoints/portable/portable_throw_entrypoints.cc
new file mode 100644
index 0000000..4b2b46b
--- /dev/null
+++ b/runtime/entrypoints/portable/portable_throw_entrypoints.cc
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "entrypoints/entrypoint_utils.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object-inl.h"
+
+namespace art {
+
+extern "C" void art_portable_throw_div_zero_from_code() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ThrowArithmeticExceptionDivideByZero();
+}
+
+extern "C" void art_portable_throw_array_bounds_from_code(int32_t index, int32_t length)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ThrowArrayIndexOutOfBoundsException(index, length);
+}
+
+extern "C" void art_portable_throw_no_such_method_from_code(int32_t method_idx)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ThrowNoSuchMethodError(method_idx);
+}
+
+extern "C" void art_portable_throw_null_pointer_exception_from_code(uint32_t dex_pc)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // TODO: remove dex_pc argument from caller.
+ UNUSED(dex_pc);
+ Thread* self = Thread::Current();
+ ThrowLocation throw_location = self->GetCurrentLocationForThrow();
+ ThrowNullPointerExceptionFromDexPC(throw_location);
+}
+
+extern "C" void art_portable_throw_stack_overflow_from_code() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ThrowStackOverflowError(Thread::Current());
+}
+
+extern "C" void art_portable_throw_exception_from_code(mirror::Throwable* exception)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Thread* self = Thread::Current();
+ ThrowLocation throw_location = self->GetCurrentLocationForThrow();
+ if (exception == NULL) {
+ ThrowNullPointerException(NULL, "throw with null exception");
+ } else {
+ self->SetException(throw_location, exception);
+ }
+}
+
+extern "C" void* art_portable_get_and_clear_exception(Thread* self)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK(self->IsExceptionPending());
+ // TODO: make this inline.
+ mirror::Throwable* exception = self->GetException(NULL);
+ self->ClearException();
+ return exception;
+}
+
+extern "C" int32_t art_portable_find_catch_block_from_code(mirror::AbstractMethod* current_method,
+ uint32_t ti_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Thread* self = Thread::Current(); // TODO: make an argument.
+ ThrowLocation throw_location;
+ mirror::Throwable* exception = self->GetException(&throw_location);
+ // Check for special deoptimization exception.
+ if (UNLIKELY(reinterpret_cast<int32_t>(exception) == -1)) {
+ return -1;
+ }
+ mirror::Class* exception_type = exception->GetClass();
+ MethodHelper mh(current_method);
+ const DexFile::CodeItem* code_item = mh.GetCodeItem();
+ DCHECK_LT(ti_offset, code_item->tries_size_);
+ const DexFile::TryItem* try_item = DexFile::GetTryItems(*code_item, ti_offset);
+
+ int iter_index = 0;
+ int result = -1;
+ uint32_t catch_dex_pc = -1;
+ // Iterate over the catch handlers associated with dex_pc
+ for (CatchHandlerIterator it(*code_item, *try_item); it.HasNext(); it.Next()) {
+ uint16_t iter_type_idx = it.GetHandlerTypeIndex();
+ // Catch all case
+ if (iter_type_idx == DexFile::kDexNoIndex16) {
+ catch_dex_pc = it.GetHandlerAddress();
+ result = iter_index;
+ break;
+ }
+ // Does this catch exception type apply?
+ mirror::Class* iter_exception_type = mh.GetDexCacheResolvedType(iter_type_idx);
+ if (UNLIKELY(iter_exception_type == NULL)) {
+ // TODO: check, the verifier (class linker?) should take care of resolving all exception
+ // classes early.
+ LOG(WARNING) << "Unresolved exception class when finding catch block: "
+ << mh.GetTypeDescriptorFromTypeIdx(iter_type_idx);
+ } else if (iter_exception_type->IsAssignableFrom(exception_type)) {
+ catch_dex_pc = it.GetHandlerAddress();
+ result = iter_index;
+ break;
+ }
+ ++iter_index;
+ }
+ if (result != -1) {
+ // Handler found.
+ Runtime::Current()->GetInstrumentation()->ExceptionCaughtEvent(self,
+ throw_location,
+ current_method,
+ catch_dex_pc,
+ exception);
+ }
+ return result;
+}
+
+} // namespace art
diff --git a/runtime/oat/runtime/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h
similarity index 87%
rename from runtime/oat/runtime/callee_save_frame.h
rename to runtime/entrypoints/quick/callee_save_frame.h
index 59f46ac..0cb578d 100644
--- a/runtime/oat/runtime/callee_save_frame.h
+++ b/runtime/entrypoints/quick/callee_save_frame.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_OAT_RUNTIME_CALLEE_SAVE_FRAME_H_
-#define ART_RUNTIME_OAT_RUNTIME_CALLEE_SAVE_FRAME_H_
+#ifndef ART_RUNTIME_ENTRYPOINTS_QUICK_CALLEE_SAVE_FRAME_H_
+#define ART_RUNTIME_ENTRYPOINTS_QUICK_CALLEE_SAVE_FRAME_H_
#include "base/mutex.h"
#include "thread-inl.h"
@@ -38,4 +38,4 @@
} // namespace art
-#endif // ART_RUNTIME_OAT_RUNTIME_CALLEE_SAVE_FRAME_H_
+#endif // ART_RUNTIME_ENTRYPOINTS_QUICK_CALLEE_SAVE_FRAME_H_
diff --git a/runtime/oat/runtime/support_alloc.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
similarity index 98%
rename from runtime/oat/runtime/support_alloc.cc
rename to runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index f66fc84..9ed802a 100644
--- a/runtime/oat/runtime/support_alloc.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -15,11 +15,11 @@
*/
#include "callee_save_frame.h"
+#include "entrypoints/entrypoint_utils.h"
#include "mirror/class-inl.h"
#include "mirror/abstract_method-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
-#include "runtime_support.h"
namespace art {
diff --git a/runtime/entrypoints/quick/quick_argument_visitor.h b/runtime/entrypoints/quick/quick_argument_visitor.h
new file mode 100644
index 0000000..35fa972
--- /dev/null
+++ b/runtime/entrypoints/quick/quick_argument_visitor.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ARGUMENT_VISITOR_H_
+#define ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ARGUMENT_VISITOR_H_
+
+#include "object_utils.h"
+
+namespace art {
+
+// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame.
+class QuickArgumentVisitor {
+ public:
+// Offset to first (not the Method*) argument in a Runtime::kRefAndArgs callee save frame.
+// Size of Runtime::kRefAndArgs callee save frame.
+// Size of Method* and register parameters in out stack arguments.
+#if defined(__arm__)
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 8
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 48
+#define QUICK_STACK_ARG_SKIP 16
+#elif defined(__mips__)
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 64
+#define QUICK_STACK_ARG_SKIP 16
+#elif defined(__i386__)
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 32
+#define QUICK_STACK_ARG_SKIP 16
+#else
+#error "Unsupported architecture"
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 0
+#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 0
+#define QUICK_STACK_ARG_SKIP 0
+#endif
+
+ QuickArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
+ caller_mh_(caller_mh),
+ args_in_regs_(ComputeArgsInRegs(caller_mh)),
+ num_params_(caller_mh.NumArgs()),
+ reg_args_(reinterpret_cast<byte*>(sp) + QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET),
+ stack_args_(reinterpret_cast<byte*>(sp) + QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE
+ + QUICK_STACK_ARG_SKIP),
+ cur_args_(reg_args_),
+ cur_arg_index_(0),
+ param_index_(0),
+ is_split_long_or_double_(false) {
+ }
+
+ virtual ~QuickArgumentVisitor() {}
+
+ virtual void Visit() = 0;
+
+ bool IsParamAReference() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return caller_mh_.IsParamAReference(param_index_);
+ }
+
+ bool IsParamALongOrDouble() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return caller_mh_.IsParamALongOrDouble(param_index_);
+ }
+
+ Primitive::Type GetParamPrimitiveType() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return caller_mh_.GetParamPrimitiveType(param_index_);
+ }
+
+ byte* GetParamAddress() const {
+ return cur_args_ + (cur_arg_index_ * kPointerSize);
+ }
+
+ bool IsSplitLongOrDouble() const {
+ return is_split_long_or_double_;
+ }
+
+ uint64_t ReadSplitLongParam() const {
+ DCHECK(IsSplitLongOrDouble());
+ uint64_t low_half = *reinterpret_cast<uint32_t*>(GetParamAddress());
+ uint64_t high_half = *reinterpret_cast<uint32_t*>(stack_args_);
+ return (low_half & 0xffffffffULL) | (high_half << 32);
+ }
+
+ void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ for (cur_arg_index_ = 0; cur_arg_index_ < args_in_regs_ && param_index_ < num_params_; ) {
+ is_split_long_or_double_ = (cur_arg_index_ == 2) && IsParamALongOrDouble();
+ Visit();
+ cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1);
+ param_index_++;
+ }
+ cur_args_ = stack_args_;
+ cur_arg_index_ = is_split_long_or_double_ ? 1 : 0;
+ is_split_long_or_double_ = false;
+ while (param_index_ < num_params_) {
+ Visit();
+ cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1);
+ param_index_++;
+ }
+ }
+
+ private:
+ static size_t ComputeArgsInRegs(MethodHelper& mh) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ size_t args_in_regs = 0;
+ size_t num_params = mh.NumArgs();
+ for (size_t i = 0; i < num_params; i++) {
+ args_in_regs = args_in_regs + (mh.IsParamALongOrDouble(i) ? 2 : 1);
+ if (args_in_regs > 3) {
+ args_in_regs = 3;
+ break;
+ }
+ }
+ return args_in_regs;
+ }
+ MethodHelper& caller_mh_;
+ const size_t args_in_regs_;
+ const size_t num_params_;
+ byte* const reg_args_;
+ byte* const stack_args_;
+ byte* cur_args_;
+ size_t cur_arg_index_;
+ size_t param_index_;
+ // Does a 64bit parameter straddle the register and stack arguments?
+ bool is_split_long_or_double_;
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ARGUMENT_VISITOR_H_
diff --git a/runtime/oat/runtime/support_cast.cc b/runtime/entrypoints/quick/quick_cast_entrypoints.cc
similarity index 97%
rename from runtime/oat/runtime/support_cast.cc
rename to runtime/entrypoints/quick/quick_cast_entrypoints.cc
index fe91e61..b810bb7 100644
--- a/runtime/oat/runtime/support_cast.cc
+++ b/runtime/entrypoints/quick/quick_cast_entrypoints.cc
@@ -15,10 +15,10 @@
*/
#include "callee_save_frame.h"
+#include "entrypoints/entrypoint_utils.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
-#include "runtime_support.h"
namespace art {
diff --git a/runtime/oat/runtime/support_deoptimize.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
similarity index 100%
rename from runtime/oat/runtime/support_deoptimize.cc
rename to runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
diff --git a/runtime/oat/runtime/support_dexcache.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
similarity index 97%
rename from runtime/oat/runtime/support_dexcache.cc
rename to runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index 0af7a62..6400161b 100644
--- a/runtime/oat/runtime/support_dexcache.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -15,13 +15,13 @@
*/
#include "callee_save_frame.h"
-#include "gc/accounting/card_table-inl.h"
+#include "entrypoints/entrypoint_utils.h"
#include "class_linker-inl.h"
#include "dex_file-inl.h"
+#include "gc/accounting/card_table-inl.h"
#include "mirror/abstract_method-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
-#include "runtime_support.h"
namespace art {
diff --git a/runtime/oat/runtime/oat_support_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h
similarity index 88%
rename from runtime/oat/runtime/oat_support_entrypoints.h
rename to runtime/entrypoints/quick/quick_entrypoints.h
index 546ee01..74b8cfd 100644
--- a/runtime/oat/runtime/oat_support_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_entrypoints.h
@@ -14,28 +14,29 @@
* limitations under the License.
*/
-#ifndef ART_RUNTIME_OAT_RUNTIME_OAT_SUPPORT_ENTRYPOINTS_H_
-#define ART_RUNTIME_OAT_RUNTIME_OAT_SUPPORT_ENTRYPOINTS_H_
+#ifndef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_H_
+#define ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_H_
#include "dex_file-inl.h"
#include "runtime.h"
-#define ENTRYPOINT_OFFSET(x) \
- (static_cast<uintptr_t>(OFFSETOF_MEMBER(Thread, entrypoints_)) + \
- static_cast<uintptr_t>(OFFSETOF_MEMBER(EntryPoints, x)))
+#define QUICK_ENTRYPOINT_OFFSET(x) \
+ (static_cast<uintptr_t>(OFFSETOF_MEMBER(Thread, quick_entrypoints_)) + \
+ static_cast<uintptr_t>(OFFSETOF_MEMBER(QuickEntryPoints, x)))
namespace art {
namespace mirror {
-class AbstractMethod;
-class Class;
-class Object;
+ class AbstractMethod;
+ class Class;
+ class Object;
} // namespace mirror
class DvmDex;
class MethodHelper;
class ShadowFrame;
class Thread;
-struct PACKED(4) EntryPoints {
+// Pointers to functions that are called by quick compiler generated code via thread-local storage.
+struct PACKED(4) QuickEntryPoints {
// Alloc
void* (*pAllocArrayFromCode)(uint32_t, void*, int32_t);
void* (*pAllocArrayFromCodeWithAccessCheck)(uint32_t, void*, int32_t);
@@ -122,8 +123,6 @@
void* (*pMemcpy)(void*, const void*, size_t);
// Invocation
- const void* (*pPortableResolutionTrampolineFromCode)(mirror::AbstractMethod*, mirror::Object*,
- mirror::AbstractMethod**, Thread*);
const void* (*pQuickResolutionTrampolineFromCode)(mirror::AbstractMethod*, mirror::Object*,
mirror::AbstractMethod**, Thread*);
void (*pInvokeDirectTrampolineWithAccessCheck)(uint32_t, void*);
@@ -166,12 +165,6 @@
jobject locked, Thread* self)
SHARED_LOCK_FUNCTION(Locks::mutator_lock_) HOT_ATTR;
-// Initialize an entry point data structure.
-void InitEntryPoints(EntryPoints* points);
-
-// Change the debugger entry point in the data structure.
-void ChangeDebuggerEntryPoint(EntryPoints* points, bool enabled);
-
} // namespace art
-#endif // ART_RUNTIME_OAT_RUNTIME_OAT_SUPPORT_ENTRYPOINTS_H_
+#endif // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_ENTRYPOINTS_H_
diff --git a/runtime/oat/runtime/support_field.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
similarity index 98%
rename from runtime/oat/runtime/support_field.cc
rename to runtime/entrypoints/quick/quick_field_entrypoints.cc
index c20326c..a4e9dc9 100644
--- a/runtime/oat/runtime/support_field.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -16,10 +16,10 @@
#include "callee_save_frame.h"
#include "dex_file-inl.h"
+#include "entrypoints/entrypoint_utils.h"
#include "mirror/abstract_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/field-inl.h"
-#include "runtime_support.h"
#include <stdint.h>
diff --git a/runtime/oat/runtime/support_fillarray.cc b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
similarity index 98%
rename from runtime/oat/runtime/support_fillarray.cc
rename to runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
index a0b06fb..b81ad12 100644
--- a/runtime/oat/runtime/support_fillarray.cc
+++ b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
@@ -52,7 +52,7 @@
ThrowLocation throw_location = self->GetCurrentLocationForThrow();
self->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayIndexOutOfBoundsException;",
"failed FILL_ARRAY_DATA; length=%d, index=%d",
- array->GetLength(), payload->element_count);
+ array->GetLength(), payload->element_count - 1);
return -1; // Error
}
uint32_t size_in_bytes = payload->element_count * payload->element_width;
diff --git a/runtime/oat/runtime/support_instrumentation.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
similarity index 100%
rename from runtime/oat/runtime/support_instrumentation.cc
rename to runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
diff --git a/runtime/oat/runtime/support_interpreter.cc b/runtime/entrypoints/quick/quick_interpreter_entrypoints.cc
similarity index 98%
rename from runtime/oat/runtime/support_interpreter.cc
rename to runtime/entrypoints/quick/quick_interpreter_entrypoints.cc
index 78b7e10..656df8d 100644
--- a/runtime/oat/runtime/support_interpreter.cc
+++ b/runtime/entrypoints/quick/quick_interpreter_entrypoints.cc
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "argument_visitor.h"
+#include "quick_argument_visitor.h"
#include "callee_save_frame.h"
#include "dex_file-inl.h"
#include "interpreter/interpreter.h"
diff --git a/runtime/oat/runtime/support_invoke.cc b/runtime/entrypoints/quick/quick_invoke_entrypoints.cc
similarity index 98%
rename from runtime/oat/runtime/support_invoke.cc
rename to runtime/entrypoints/quick/quick_invoke_entrypoints.cc
index 6a95f3c..53b3628 100644
--- a/runtime/oat/runtime/support_invoke.cc
+++ b/runtime/entrypoints/quick/quick_invoke_entrypoints.cc
@@ -16,12 +16,12 @@
#include "callee_save_frame.h"
#include "dex_instruction-inl.h"
+#include "entrypoints/entrypoint_utils.h"
#include "mirror/class-inl.h"
#include "mirror/dex_cache-inl.h"
#include "mirror/abstract_method-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
-#include "runtime_support.h"
namespace art {
diff --git a/runtime/oat/runtime/support_jni.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
similarity index 98%
rename from runtime/oat/runtime/support_jni.cc
rename to runtime/entrypoints/quick/quick_jni_entrypoints.cc
index 2d31160..23a28f9 100644
--- a/runtime/oat/runtime/support_jni.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -15,13 +15,13 @@
*/
#include "dex_file-inl.h"
+#include "entrypoints/entrypoint_utils.h"
#include "mirror/class-inl.h"
#include "mirror/abstract_method-inl.h"
#include "mirror/object.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "object_utils.h"
-#include "runtime_support.h"
#include "scoped_thread_state_change.h"
#include "thread.h"
diff --git a/runtime/oat/runtime/support_locks.cc b/runtime/entrypoints/quick/quick_lock_entrypoints.cc
similarity index 100%
rename from runtime/oat/runtime/support_locks.cc
rename to runtime/entrypoints/quick/quick_lock_entrypoints.cc
diff --git a/runtime/oat/runtime/support_math.cc b/runtime/entrypoints/quick/quick_math_entrypoints.cc
similarity index 100%
rename from runtime/oat/runtime/support_math.cc
rename to runtime/entrypoints/quick/quick_math_entrypoints.cc
diff --git a/runtime/entrypoints/quick/quick_proxy_entrypoints.cc b/runtime/entrypoints/quick/quick_proxy_entrypoints.cc
new file mode 100644
index 0000000..4e3d749
--- /dev/null
+++ b/runtime/entrypoints/quick/quick_proxy_entrypoints.cc
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "quick_argument_visitor.h"
+#include "dex_file-inl.h"
+#include "entrypoints/entrypoint_utils.h"
+#include "mirror/abstract_method-inl.h"
+#include "mirror/object_array-inl.h"
+#include "mirror/object-inl.h"
+#include "object_utils.h"
+#include "reflection.h"
+#include "scoped_thread_state_change.h"
+#include "thread.h"
+#include "well_known_classes.h"
+
+#include "ScopedLocalRef.h"
+
+namespace art {
+
+// Visits arguments on the stack placing them into the args vector, Object* arguments are converted
+// to jobjects.
+class BuildQuickArgumentVisitor : public QuickArgumentVisitor {
+ public:
+ BuildQuickArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp,
+ ScopedObjectAccessUnchecked& soa, std::vector<jvalue>& args) :
+ QuickArgumentVisitor(caller_mh, sp), soa_(soa), args_(args) {}
+
+ virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ jvalue val;
+ Primitive::Type type = GetParamPrimitiveType();
+ switch (type) {
+ case Primitive::kPrimNot: {
+ mirror::Object* obj = *reinterpret_cast<mirror::Object**>(GetParamAddress());
+ val.l = soa_.AddLocalReference<jobject>(obj);
+ break;
+ }
+ case Primitive::kPrimLong: // Fall-through.
+ case Primitive::kPrimDouble:
+ if (IsSplitLongOrDouble()) {
+ val.j = ReadSplitLongParam();
+ } else {
+ val.j = *reinterpret_cast<jlong*>(GetParamAddress());
+ }
+ break;
+ case Primitive::kPrimBoolean: // Fall-through.
+ case Primitive::kPrimByte: // Fall-through.
+ case Primitive::kPrimChar: // Fall-through.
+ case Primitive::kPrimShort: // Fall-through.
+ case Primitive::kPrimInt: // Fall-through.
+ case Primitive::kPrimFloat:
+ val.i = *reinterpret_cast<jint*>(GetParamAddress());
+ break;
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "UNREACHABLE";
+ val.j = 0;
+ break;
+ }
+ args_.push_back(val);
+ }
+
+ private:
+ ScopedObjectAccessUnchecked& soa_;
+ std::vector<jvalue>& args_;
+
+ DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor);
+};
+
+// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method
+// which is responsible for recording callee save registers. We explicitly place into jobjects the
+// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a
+// field within the proxy object, which will box the primitive arguments and deal with error cases.
+extern "C" uint64_t artQuickProxyInvokeHandler(mirror::AbstractMethod* proxy_method,
+ mirror::Object* receiver,
+ Thread* self, mirror::AbstractMethod** sp)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Ensure we don't get thread suspension until the object arguments are safely in jobjects.
+ const char* old_cause =
+ self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
+ // Register the top of the managed stack, making stack crawlable.
+ DCHECK_EQ(*sp, proxy_method);
+ self->SetTopOfStack(sp, 0);
+ DCHECK_EQ(proxy_method->GetFrameSizeInBytes(),
+ Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
+ self->VerifyStack();
+ // Start new JNI local reference state.
+ JNIEnvExt* env = self->GetJniEnv();
+ ScopedObjectAccessUnchecked soa(env);
+ ScopedJniEnvLocalRefState env_state(env);
+ // Create local ref. copies of proxy method and the receiver.
+ jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
+
+ // Placing arguments into args vector and remove the receiver.
+ MethodHelper proxy_mh(proxy_method);
+ std::vector<jvalue> args;
+ BuildQuickArgumentVisitor local_ref_visitor(proxy_mh, sp, soa, args);
+ local_ref_visitor.VisitArguments();
+ args.erase(args.begin());
+
+ // Convert proxy method into expected interface method.
+ mirror::AbstractMethod* interface_method = proxy_method->FindOverriddenMethod();
+ DCHECK(interface_method != NULL);
+ DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
+ jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_method);
+
+ // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code
+ // that performs allocations.
+ self->EndAssertNoThreadSuspension(old_cause);
+ JValue result = InvokeProxyInvocationHandler(soa, proxy_mh.GetShorty(),
+ rcvr_jobj, interface_method_jobj, args);
+ return result.GetJ();
+}
+
+} // namespace art
diff --git a/runtime/oat/runtime/support_stubs.cc b/runtime/entrypoints/quick/quick_stub_entrypoints.cc
similarity index 66%
rename from runtime/oat/runtime/support_stubs.cc
rename to runtime/entrypoints/quick/quick_stub_entrypoints.cc
index f2af6d2..d78bbf3 100644
--- a/runtime/oat/runtime/support_stubs.cc
+++ b/runtime/entrypoints/quick/quick_stub_entrypoints.cc
@@ -30,127 +30,6 @@
namespace art {
-// Lazily resolve a method for portable. Called by stub code.
-extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called,
- mirror::Object* receiver,
- mirror::AbstractMethod** called_addr,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- uint32_t dex_pc;
- mirror::AbstractMethod* caller = thread->GetCurrentMethod(&dex_pc);
-
- ClassLinker* linker = Runtime::Current()->GetClassLinker();
- InvokeType invoke_type;
- bool is_range;
- if (called->IsRuntimeMethod()) {
- const DexFile::CodeItem* code = MethodHelper(caller).GetCodeItem();
- CHECK_LT(dex_pc, code->insns_size_in_code_units_);
- const Instruction* instr = Instruction::At(&code->insns_[dex_pc]);
- Instruction::Code instr_code = instr->Opcode();
- switch (instr_code) {
- case Instruction::INVOKE_DIRECT:
- invoke_type = kDirect;
- is_range = false;
- break;
- case Instruction::INVOKE_DIRECT_RANGE:
- invoke_type = kDirect;
- is_range = true;
- break;
- case Instruction::INVOKE_STATIC:
- invoke_type = kStatic;
- is_range = false;
- break;
- case Instruction::INVOKE_STATIC_RANGE:
- invoke_type = kStatic;
- is_range = true;
- break;
- case Instruction::INVOKE_SUPER:
- invoke_type = kSuper;
- is_range = false;
- break;
- case Instruction::INVOKE_SUPER_RANGE:
- invoke_type = kSuper;
- is_range = true;
- break;
- case Instruction::INVOKE_VIRTUAL:
- invoke_type = kVirtual;
- is_range = false;
- break;
- case Instruction::INVOKE_VIRTUAL_RANGE:
- invoke_type = kVirtual;
- is_range = true;
- break;
- case Instruction::INVOKE_INTERFACE:
- invoke_type = kInterface;
- is_range = false;
- break;
- case Instruction::INVOKE_INTERFACE_RANGE:
- invoke_type = kInterface;
- is_range = true;
- break;
- default:
- LOG(FATAL) << "Unexpected call into trampoline: " << instr->DumpString(NULL);
- // Avoid used uninitialized warnings.
- invoke_type = kDirect;
- is_range = true;
- }
- uint32_t dex_method_idx = (is_range) ? instr->VRegB_3rc() : instr->VRegB_35c();
- called = linker->ResolveMethod(dex_method_idx, caller, invoke_type);
- // Refine called method based on receiver.
- if (invoke_type == kVirtual) {
- called = receiver->GetClass()->FindVirtualMethodForVirtual(called);
- } else if (invoke_type == kInterface) {
- called = receiver->GetClass()->FindVirtualMethodForInterface(called);
- }
- } else {
- CHECK(called->IsStatic()) << PrettyMethod(called);
- invoke_type = kStatic;
- }
- const void* code = NULL;
- if (LIKELY(!thread->IsExceptionPending())) {
- // Incompatible class change should have been handled in resolve method.
- CHECK(!called->CheckIncompatibleClassChange(invoke_type));
- // Ensure that the called method's class is initialized.
- mirror::Class* called_class = called->GetDeclaringClass();
- linker->EnsureInitialized(called_class, true, true);
- if (LIKELY(called_class->IsInitialized())) {
- code = called->GetEntryPointFromCompiledCode();
- // TODO: remove this after we solve the link issue.
- { // for lazy link.
- if (code == NULL) {
- code = linker->GetOatCodeFor(called);
- }
- }
- } else if (called_class->IsInitializing()) {
- if (invoke_type == kStatic) {
- // Class is still initializing, go to oat and grab code (trampoline must be left in place
- // until class is initialized to stop races between threads).
- code = linker->GetOatCodeFor(called);
- } else {
- // No trampoline for non-static methods.
- code = called->GetEntryPointFromCompiledCode();
- // TODO: remove this after we solve the link issue.
- { // for lazy link.
- if (code == NULL) {
- code = linker->GetOatCodeFor(called);
- }
- }
- }
- } else {
- DCHECK(called_class->IsErroneous());
- }
- }
- if (LIKELY(code != NULL)) {
- // Expect class to at least be initializing.
- DCHECK(called->GetDeclaringClass()->IsInitializing());
- // Don't want infinite recursion.
- DCHECK(code != GetResolutionTrampoline(linker));
- // Set up entry into main method
- *called_addr = called;
- }
- return code;
-}
-
// Lazily resolve a method for quick. Called by stub code.
extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called,
mirror::Object* receiver,
@@ -413,26 +292,4 @@
self->QuickDeliverException();
}
-// Used by the JNI dlsym stub to find the native method to invoke if none is registered.
-extern "C" void* artFindNativeMethod(Thread* self) {
- Locks::mutator_lock_->AssertNotHeld(self); // We come here as Native.
- DCHECK(Thread::Current() == self);
- ScopedObjectAccess soa(self);
-
- mirror::AbstractMethod* method = self->GetCurrentMethod(NULL);
- DCHECK(method != NULL);
-
- // Lookup symbol address for method, on failure we'll return NULL with an
- // exception set, otherwise we return the address of the method we found.
- void* native_code = soa.Vm()->FindCodeForNativeMethod(method);
- if (native_code == NULL) {
- DCHECK(self->IsExceptionPending());
- return NULL;
- } else {
- // Register so that future calls don't come here
- method->RegisterNative(self, native_code);
- return native_code;
- }
-}
-
} // namespace art
diff --git a/runtime/oat/runtime/support_thread.cc b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
similarity index 95%
rename from runtime/oat/runtime/support_thread.cc
rename to runtime/entrypoints/quick/quick_thread_entrypoints.cc
index e711714..b4d6c0b 100644
--- a/runtime/oat/runtime/support_thread.cc
+++ b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
@@ -15,7 +15,7 @@
*/
#include "callee_save_frame.h"
-#include "runtime_support.h"
+#include "entrypoints/entrypoint_utils.h"
#include "thread.h"
#include "thread_list.h"
diff --git a/runtime/oat/runtime/support_throw.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
similarity index 98%
rename from runtime/oat/runtime/support_throw.cc
rename to runtime/entrypoints/quick/quick_throw_entrypoints.cc
index 9588698..3bfa2f2 100644
--- a/runtime/oat/runtime/support_throw.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -15,9 +15,9 @@
*/
#include "callee_save_frame.h"
+#include "entrypoints/entrypoint_utils.h"
#include "mirror/object.h"
#include "object_utils.h"
-#include "runtime_support.h"
#include "thread.h"
#include "well_known_classes.h"
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 92d9ea2..a732566 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -47,7 +47,7 @@
DCHECK(begin_ != NULL);
front_index_ = 0;
back_index_ = 0;
- is_sorted_ = true;
+ debug_is_sorted_ = true;
int result = madvise(begin_, sizeof(T) * capacity_, MADV_DONTNEED);
if (result == -1) {
PLOG(WARNING) << "madvise failed";
@@ -58,8 +58,10 @@
// Returns false if we overflowed the stack.
bool AtomicPushBack(const T& value) {
+ if (kIsDebugBuild) {
+ debug_is_sorted_ = false;
+ }
int32_t index;
- is_sorted_ = false;
do {
index = back_index_;
if (UNLIKELY(static_cast<size_t>(index) >= capacity_)) {
@@ -72,7 +74,9 @@
}
void PushBack(const T& value) {
- is_sorted_ = false;
+ if (kIsDebugBuild) {
+ debug_is_sorted_ = false;
+ }
int32_t index = back_index_;
DCHECK_LT(static_cast<size_t>(index), capacity_);
back_index_ = index + 1;
@@ -122,22 +126,23 @@
}
void Sort() {
- if (!is_sorted_) {
- int32_t start_back_index = back_index_.load();
- int32_t start_front_index = front_index_.load();
- is_sorted_ = true;
- std::sort(Begin(), End());
- CHECK_EQ(start_back_index, back_index_.load());
- CHECK_EQ(start_front_index, front_index_.load());
+ int32_t start_back_index = back_index_.load();
+ int32_t start_front_index = front_index_.load();
+ std::sort(Begin(), End());
+ CHECK_EQ(start_back_index, back_index_.load());
+ CHECK_EQ(start_front_index, front_index_.load());
+ if (kIsDebugBuild) {
+ debug_is_sorted_ = true;
}
}
+ bool ContainsSorted(const T& value) const {
+ DCHECK(debug_is_sorted_);
+ return std::binary_search(Begin(), End(), value);
+ }
+
bool Contains(const T& value) const {
- if (is_sorted_) {
- return std::binary_search(Begin(), End(), value);
- } else {
- return std::find(Begin(), End(), value) != End();
- }
+ return std::find(Begin(), End(), value) != End();
}
private:
@@ -147,7 +152,7 @@
front_index_(0),
begin_(NULL),
capacity_(capacity),
- is_sorted_(true) {
+ debug_is_sorted_(true) {
}
// Size in number of elements.
@@ -156,6 +161,7 @@
CHECK(mem_map_.get() != NULL) << "couldn't allocate mark stack";
byte* addr = mem_map_->Begin();
CHECK(addr != NULL);
+ debug_is_sorted_ = true;
begin_ = reinterpret_cast<T*>(addr);
Reset();
}
@@ -178,7 +184,8 @@
// Maximum number of elements.
size_t capacity_;
- bool is_sorted_;
+ // Whether or not the stack is sorted, only updated in debug mode to avoid performance overhead.
+ bool debug_is_sorted_;
DISALLOW_COPY_AND_ASSIGN(AtomicStack);
};
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index 1684664..0f566c9 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -64,7 +64,7 @@
void RegisterPause(uint64_t nano_length);
- base::NewTimingLogger& GetTimings() {
+ base::TimingLogger& GetTimings() {
return timings_;
}
@@ -101,7 +101,7 @@
const bool verbose_;
uint64_t duration_ns_;
- base::NewTimingLogger timings_;
+ base::TimingLogger timings_;
// Cumulative statistics.
uint64_t total_time_ns_;
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 5736e38..89c768a 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -1509,7 +1509,7 @@
// Update the cumulative loggers.
cumulative_timings_.Start();
- cumulative_timings_.AddNewLogger(timings_);
+ cumulative_timings_.AddLogger(timings_);
cumulative_timings_.End();
// Clear all of the spaces' mark bitmaps.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 0c1c631..00f7e5b 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -148,7 +148,7 @@
CHECK(large_object_space_ != NULL) << "Failed to create large object space";
AddDiscontinuousSpace(large_object_space_);
- alloc_space_ = space::DlMallocSpace::Create("alloc space",
+ alloc_space_ = space::DlMallocSpace::Create(Runtime::Current()->IsZygote() ? "zygote space" : "alloc space",
initial_size,
growth_limit, capacity,
requested_alloc_space_begin);
@@ -524,25 +524,24 @@
bool Heap::IsLiveObjectLocked(const mirror::Object* obj) {
// Locks::heap_bitmap_lock_->AssertReaderHeld(Thread::Current());
- if (obj == NULL) {
+ if (obj == NULL || UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
return false;
}
- if (UNLIKELY(!IsAligned<kObjectAlignment>(obj))) {
- return false;
- }
- space::ContinuousSpace* cont_space = FindContinuousSpaceFromObject(obj, true);
- if (cont_space != NULL) {
- if (cont_space->GetLiveBitmap()->Test(obj)) {
+ space::ContinuousSpace* c_space = FindContinuousSpaceFromObject(obj, true);
+ space::DiscontinuousSpace* d_space = NULL;
+ if (c_space != NULL) {
+ if (c_space->GetLiveBitmap()->Test(obj)) {
return true;
}
} else {
- space::DiscontinuousSpace* disc_space = FindDiscontinuousSpaceFromObject(obj, true);
- if (disc_space != NULL) {
- if (disc_space->GetLiveObjects()->Test(obj)) {
+ d_space = FindDiscontinuousSpaceFromObject(obj, true);
+ if (d_space != NULL) {
+ if (d_space->GetLiveObjects()->Test(obj)) {
return true;
}
}
}
+ // This is covering the allocation/live stack swapping that is done without mutators suspended.
for (size_t i = 0; i < 5; ++i) {
if (allocation_stack_->Contains(const_cast<mirror::Object*>(obj)) ||
live_stack_->Contains(const_cast<mirror::Object*>(obj))) {
@@ -550,6 +549,18 @@
}
NanoSleep(MsToNs(10));
}
+ // We need to check the bitmaps again since there is a race where we mark something as live and
+ // then clear the stack containing it.
+ if (c_space != NULL) {
+ if (c_space->GetLiveBitmap()->Test(obj)) {
+ return true;
+ }
+ } else {
+ d_space = FindDiscontinuousSpaceFromObject(obj, true);
+ if (d_space != NULL && d_space->GetLiveObjects()->Test(obj)) {
+ return true;
+ }
+ }
return false;
}
@@ -972,7 +983,7 @@
// Turns the current alloc space into a Zygote space and obtain the new alloc space composed
// of the remaining available heap memory.
space::DlMallocSpace* zygote_space = alloc_space_;
- alloc_space_ = zygote_space->CreateZygoteSpace();
+ alloc_space_ = zygote_space->CreateZygoteSpace("alloc space");
alloc_space_->SetFootprintLimit(alloc_space_->Capacity());
// Change the GC retention policy of the zygote space to only collect when full.
@@ -1131,7 +1142,7 @@
<< PrettySize(total_memory) << ", " << "paused " << pause_string.str()
<< " total " << PrettyDuration((duration / 1000) * 1000);
if (VLOG_IS_ON(heap)) {
- LOG(INFO) << Dumpable<base::NewTimingLogger>(collector->GetTimings());
+ LOG(INFO) << Dumpable<base::TimingLogger>(collector->GetTimings());
}
}
@@ -1149,7 +1160,7 @@
return gc_type;
}
-void Heap::UpdateAndMarkModUnion(collector::MarkSweep* mark_sweep, base::NewTimingLogger& timings,
+void Heap::UpdateAndMarkModUnion(collector::MarkSweep* mark_sweep, base::TimingLogger& timings,
collector::GcType gc_type) {
if (gc_type == collector::kGcTypeSticky) {
// Don't need to do anything for mod union table in this case since we are only scanning dirty
@@ -1229,10 +1240,10 @@
if (bitmap != NULL && bitmap->Test(obj)) {
LOG(ERROR) << "Object " << obj << " found in live bitmap";
}
- if (alloc_stack->Contains(const_cast<mirror::Object*>(obj))) {
+ if (alloc_stack->ContainsSorted(const_cast<mirror::Object*>(obj))) {
LOG(ERROR) << "Object " << obj << " found in allocation stack";
}
- if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
+ if (live_stack->ContainsSorted(const_cast<mirror::Object*>(obj))) {
LOG(ERROR) << "Object " << obj << " found in live stack";
}
// Attempt to see if the card table missed the reference.
@@ -1252,10 +1263,10 @@
} else {
LOG(ERROR) << "Root references dead object " << ref << "\nRef type " << PrettyTypeOf(ref);
}
- if (alloc_stack->Contains(const_cast<mirror::Object*>(ref))) {
+ if (alloc_stack->ContainsSorted(const_cast<mirror::Object*>(ref))) {
LOG(ERROR) << "Reference " << ref << " found in allocation stack!";
}
- if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
+ if (live_stack->ContainsSorted(const_cast<mirror::Object*>(ref))) {
LOG(ERROR) << "Reference " << ref << " found in live stack!";
}
heap_->image_mod_union_table_->Dump(LOG(ERROR) << "Image mod-union table: ");
@@ -1345,8 +1356,8 @@
// Card should be either kCardDirty if it got re-dirtied after we aged it, or
// kCardDirty - 1 if it didnt get touched since we aged it.
accounting::ObjectStack* live_stack = heap_->live_stack_.get();
- if (live_stack->Contains(const_cast<mirror::Object*>(ref))) {
- if (live_stack->Contains(const_cast<mirror::Object*>(obj))) {
+ if (live_stack->ContainsSorted(const_cast<mirror::Object*>(ref))) {
+ if (live_stack->ContainsSorted(const_cast<mirror::Object*>(obj))) {
LOG(ERROR) << "Object " << obj << " found in live stack";
}
if (heap_->GetLiveBitmap()->Test(obj)) {
@@ -1441,7 +1452,7 @@
}
}
-void Heap::ProcessCards(base::NewTimingLogger& timings) {
+void Heap::ProcessCards(base::TimingLogger& timings) {
// Clear cards and keep track of cards cleared in the mod-union table.
typedef std::vector<space::ContinuousSpace*>::iterator It;
for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
@@ -1934,5 +1945,27 @@
} while (!native_bytes_allocated_.compare_and_swap(expected_size, new_size));
}
+int64_t Heap::GetTotalMemory() const {
+ int64_t ret = 0;
+ typedef std::vector<space::ContinuousSpace*>::const_iterator It;
+ for (It it = continuous_spaces_.begin(), end = continuous_spaces_.end(); it != end; ++it) {
+ space::ContinuousSpace* space = *it;
+ if (space->IsImageSpace()) {
+ // Currently don't include the image space.
+ } else if (space->IsDlMallocSpace()) {
+ // Zygote or alloc space
+ ret += space->AsDlMallocSpace()->GetFootprint();
+ }
+ }
+ typedef std::vector<space::DiscontinuousSpace*>::const_iterator It2;
+ for (It2 it = discontinuous_spaces_.begin(), end = discontinuous_spaces_.end(); it != end; ++it) {
+ space::DiscontinuousSpace* space = *it;
+ if (space->IsLargeObjectSpace()) {
+ ret += space->AsLargeObjectSpace()->GetBytesAllocated();
+ }
+ }
+ return ret;
+}
+
} // namespace gc
} // namespace art
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 20512b8..7615f98 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -330,11 +330,7 @@
// Implements java.lang.Runtime.totalMemory, returning the amount of memory consumed by an
// application.
- int64_t GetTotalMemory() const {
- // TODO: we use the footprint limit here which is conservative wrt number of pages really used.
- // We could implement a more accurate count across all spaces.
- return max_allowed_footprint_;
- }
+ int64_t GetTotalMemory() const;
// Implements java.lang.Runtime.freeMemory.
int64_t GetFreeMemory() const {
@@ -382,7 +378,7 @@
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Update and mark mod union table based on gc type.
- void UpdateAndMarkModUnion(collector::MarkSweep* mark_sweep, base::NewTimingLogger& timings,
+ void UpdateAndMarkModUnion(collector::MarkSweep* mark_sweep, base::TimingLogger& timings,
collector::GcType gc_type)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -475,7 +471,7 @@
void SwapStacks();
// Clear cards and update the mod union table.
- void ProcessCards(base::NewTimingLogger& timings);
+ void ProcessCards(base::TimingLogger& timings);
// All-known continuous spaces, where objects lie within fixed bounds.
std::vector<space::ContinuousSpace*> continuous_spaces_;
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index ee88eda..de4917f 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -286,7 +286,7 @@
}
}
-DlMallocSpace* DlMallocSpace::CreateZygoteSpace() {
+DlMallocSpace* DlMallocSpace::CreateZygoteSpace(const char* alloc_space_name) {
end_ = reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(end_), kPageSize));
DCHECK(IsAligned<accounting::CardTable::kCardSize>(begin_));
DCHECK(IsAligned<accounting::CardTable::kCardSize>(end_));
@@ -316,20 +316,19 @@
VLOG(heap) << "Size " << GetMemMap()->Size();
VLOG(heap) << "GrowthLimit " << PrettySize(growth_limit);
VLOG(heap) << "Capacity " << PrettySize(capacity);
- UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(GetName(), End(), capacity, PROT_READ | PROT_WRITE));
+ UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(alloc_space_name, End(), capacity, PROT_READ | PROT_WRITE));
void* mspace = CreateMallocSpace(end_, starting_size, initial_size);
// Protect memory beyond the initial size.
byte* end = mem_map->Begin() + starting_size;
if (capacity - initial_size > 0) {
- CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), name_.c_str());
+ CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), alloc_space_name);
}
DlMallocSpace* alloc_space =
- new DlMallocSpace(name_, mem_map.release(), mspace, end_, end, growth_limit);
+ new DlMallocSpace(alloc_space_name, mem_map.release(), mspace, end_, end, growth_limit);
live_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(End()));
CHECK_EQ(live_bitmap_->HeapLimit(), reinterpret_cast<uintptr_t>(End()));
mark_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(End()));
CHECK_EQ(mark_bitmap_->HeapLimit(), reinterpret_cast<uintptr_t>(End()));
- name_ += "-zygote-transformed";
VLOG(heap) << "zygote space creation done";
return alloc_space;
}
@@ -449,6 +448,11 @@
callback(NULL, NULL, 0, arg); // Indicate end of a space.
}
+size_t DlMallocSpace::GetFootprint() {
+ MutexLock mu(Thread::Current(), lock_);
+ return mspace_footprint(mspace_);
+}
+
size_t DlMallocSpace::GetFootprintLimit() {
MutexLock mu(Thread::Current(), lock_);
return mspace_footprint_limit(mspace_);
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index 8a4314c..c15d0ba 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -73,6 +73,10 @@
// in use, indicated by num_bytes equaling zero.
void Walk(WalkCallback callback, void* arg);
+ // Returns the number of bytes that the space has currently obtained from the system. This is
+ // greater or equal to the amount of live data in the space.
+ size_t GetFootprint();
+
// Returns the number of bytes that the heap is allowed to obtain from the system via MoreCore.
size_t GetFootprintLimit();
@@ -113,7 +117,7 @@
void SwapBitmaps();
// Turn ourself into a zygote space and return a new alloc space which has our unused memory.
- DlMallocSpace* CreateZygoteSpace();
+ DlMallocSpace* CreateZygoteSpace(const char* alloc_space_name);
uint64_t GetBytesAllocated() const {
return num_bytes_allocated_;
diff --git a/runtime/gc/space/space_test.cc b/runtime/gc/space/space_test.cc
index 08ae894..3003140 100644
--- a/runtime/gc/space/space_test.cc
+++ b/runtime/gc/space/space_test.cc
@@ -123,7 +123,7 @@
// Make sure that the zygote space isn't directly at the start of the space.
space->Alloc(self, 1U * MB);
- space = space->CreateZygoteSpace();
+ space = space->CreateZygoteSpace("alloc space");
// Make space findable to the heap, will also delete space when runtime is cleaned up
AddContinuousSpace(space);
diff --git a/runtime/image_test.cc b/runtime/image_test.cc
index 75eead4..22bed2e 100644
--- a/runtime/image_test.cc
+++ b/runtime/image_test.cc
@@ -44,7 +44,8 @@
{
jobject class_loader = NULL;
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
- TimingLogger timings("ImageTest::WriteRead", false);
+ base::TimingLogger timings("ImageTest::WriteRead", false, false);
+ timings.StartSplit("CompileAll");
compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), timings);
ScopedObjectAccess soa(Thread::Current());
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index e99fbd8..c0b85f4 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -30,7 +30,7 @@
#include "mirror/object-inl.h"
#include "nth_caller_visitor.h"
#if !defined(ART_USE_PORTABLE_COMPILER)
-#include "oat/runtime/oat_support_entrypoints.h"
+#include "entrypoints/quick/quick_entrypoints.h"
#endif
#include "object_utils.h"
#include "os.h"
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 30c7a46..ef4b95c 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -24,6 +24,7 @@
#include "dex_file-inl.h"
#include "dex_instruction-inl.h"
#include "dex_instruction.h"
+#include "entrypoints/entrypoint_utils.h"
#include "gc/accounting/card_table-inl.h"
#include "invoke_arg_array_builder.h"
#include "nth_caller_visitor.h"
@@ -35,7 +36,6 @@
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "object_utils.h"
-#include "runtime_support.h"
#include "ScopedLocalRef.h"
#include "scoped_thread_state_change.h"
#include "thread.h"
@@ -408,11 +408,11 @@
// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
// specialization.
template<InvokeType type, bool is_range, bool do_access_check>
-static void DoInvoke(Thread* self, ShadowFrame& shadow_frame,
+static bool DoInvoke(Thread* self, ShadowFrame& shadow_frame,
const Instruction* inst, JValue* result) NO_THREAD_SAFETY_ANALYSIS;
template<InvokeType type, bool is_range, bool do_access_check>
-static void DoInvoke(Thread* self, ShadowFrame& shadow_frame,
+static bool DoInvoke(Thread* self, ShadowFrame& shadow_frame,
const Instruction* inst, JValue* result) {
uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
@@ -422,7 +422,11 @@
if (UNLIKELY(method == NULL)) {
CHECK(self->IsExceptionPending());
result->SetJ(0);
- return;
+ return false;
+ } else if (UNLIKELY(method->IsAbstract())) {
+ ThrowAbstractMethodError(method);
+ result->SetJ(0);
+ return false;
}
MethodHelper mh(method);
@@ -432,9 +436,6 @@
if (LIKELY(code_item != NULL)) {
num_regs = code_item->registers_size_;
num_ins = code_item->ins_size_;
- } else if (method->IsAbstract()) {
- ThrowAbstractMethodError(method);
- return;
} else {
DCHECK(method->IsNative() || method->IsProxyMethod());
num_regs = num_ins = AbstractMethod::NumArgRegisters(mh.GetShorty());
@@ -486,17 +487,18 @@
} else {
UnstartedRuntimeInvoke(self, mh, code_item, new_shadow_frame, result, num_regs - num_ins);
}
+ return !self->IsExceptionPending();
}
// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
// specialization.
template<bool is_range>
-static void DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame,
+static bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame,
const Instruction* inst, JValue* result)
NO_THREAD_SAFETY_ANALYSIS;
template<bool is_range>
-static void DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame,
+static bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame,
const Instruction* inst, JValue* result) {
uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
Object* receiver = shadow_frame.GetVRegReference(vregC);
@@ -504,26 +506,28 @@
// We lost the reference to the method index so we cannot get a more
// precised exception message.
ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- return;
+ return false;
}
uint32_t vtable_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
+ // TODO: use ObjectArray<T>::GetWithoutChecks ?
AbstractMethod* method = receiver->GetClass()->GetVTable()->Get(vtable_idx);
if (UNLIKELY(method == NULL)) {
CHECK(self->IsExceptionPending());
result->SetJ(0);
- return;
+ return false;
+ } else if (UNLIKELY(method->IsAbstract())) {
+ ThrowAbstractMethodError(method);
+ result->SetJ(0);
+ return false;
}
- MethodHelper mh(method);
+ MethodHelper mh(method);
const DexFile::CodeItem* code_item = mh.GetCodeItem();
uint16_t num_regs;
uint16_t num_ins;
if (code_item != NULL) {
num_regs = code_item->registers_size_;
num_ins = code_item->ins_size_;
- } else if (method->IsAbstract()) {
- ThrowAbstractMethodError(method);
- return;
} else {
DCHECK(method->IsNative() || method->IsProxyMethod());
num_regs = num_ins = AbstractMethod::NumArgRegisters(mh.GetShorty());
@@ -576,6 +580,7 @@
} else {
UnstartedRuntimeInvoke(self, mh, code_item, new_shadow_frame, result, num_regs - num_ins);
}
+ return !self->IsExceptionPending();
}
// We use template functions to optimize compiler inlining process. Otherwise,
@@ -587,12 +592,12 @@
// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
// specialization.
template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
-static void DoFieldGet(Thread* self, ShadowFrame& shadow_frame,
+static bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame,
const Instruction* inst)
NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE;
template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
-static inline void DoFieldGet(Thread* self, ShadowFrame& shadow_frame,
+static inline bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame,
const Instruction* inst) {
bool is_static = (find_type == StaticObjectRead) || (find_type == StaticPrimitiveRead);
uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
@@ -601,7 +606,7 @@
do_access_check);
if (UNLIKELY(f == NULL)) {
CHECK(self->IsExceptionPending());
- return;
+ return false;
}
Object* obj;
if (is_static) {
@@ -610,7 +615,7 @@
obj = shadow_frame.GetVRegReference(inst->VRegB_22c());
if (UNLIKELY(obj == NULL)) {
ThrowNullPointerExceptionForFieldAccess(shadow_frame.GetCurrentLocationForThrow(), f, true);
- return;
+ return false;
}
}
uint32_t vregA = is_static ? inst->VRegA_21c() : inst->VRegA_22c();
@@ -639,24 +644,25 @@
default:
LOG(FATAL) << "Unreachable: " << field_type;
}
+ return true;
}
// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
// specialization.
template<Primitive::Type field_type>
-static void DoIGetQuick(Thread* self, ShadowFrame& shadow_frame,
+static bool DoIGetQuick(Thread* self, ShadowFrame& shadow_frame,
const Instruction* inst)
NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE;
template<Primitive::Type field_type>
-static inline void DoIGetQuick(Thread* self, ShadowFrame& shadow_frame,
+static inline bool DoIGetQuick(Thread* self, ShadowFrame& shadow_frame,
const Instruction* inst) {
Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c());
if (UNLIKELY(obj == NULL)) {
// We lost the reference to the field index so we cannot get a more
// precised exception message.
ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- return;
+ return false;
}
MemberOffset field_offset(inst->VRegC_22c());
const bool is_volatile = false; // iget-x-quick only on non volatile fields.
@@ -674,17 +680,18 @@
default:
LOG(FATAL) << "Unreachable: " << field_type;
}
+ return true;
}
// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
// specialization.
template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
-static void DoFieldPut(Thread* self, const ShadowFrame& shadow_frame,
+static bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame,
const Instruction* inst)
NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE;
template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
-static inline void DoFieldPut(Thread* self, const ShadowFrame& shadow_frame,
+static inline bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame,
const Instruction* inst) {
bool is_static = (find_type == StaticObjectWrite) || (find_type == StaticPrimitiveWrite);
uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
@@ -693,7 +700,7 @@
do_access_check);
if (UNLIKELY(f == NULL)) {
CHECK(self->IsExceptionPending());
- return;
+ return false;
}
Object* obj;
if (is_static) {
@@ -703,7 +710,7 @@
if (UNLIKELY(obj == NULL)) {
ThrowNullPointerExceptionForFieldAccess(shadow_frame.GetCurrentLocationForThrow(),
f, false);
- return;
+ return false;
}
}
uint32_t vregA = is_static ? inst->VRegA_21c() : inst->VRegA_22c();
@@ -732,24 +739,25 @@
default:
LOG(FATAL) << "Unreachable: " << field_type;
}
+ return true;
}
// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
// specialization.
template<Primitive::Type field_type>
-static void DoIPutQuick(Thread* self, ShadowFrame& shadow_frame,
+static bool DoIPutQuick(Thread* self, ShadowFrame& shadow_frame,
const Instruction* inst)
NO_THREAD_SAFETY_ANALYSIS ALWAYS_INLINE;
template<Primitive::Type field_type>
-static inline void DoIPutQuick(Thread* self, ShadowFrame& shadow_frame,
+static inline bool DoIPutQuick(Thread* self, ShadowFrame& shadow_frame,
const Instruction* inst) {
Object* obj = shadow_frame.GetVRegReference(inst->VRegB_22c());
if (UNLIKELY(obj == NULL)) {
// We lost the reference to the field index so we cannot get a more
// precised exception message.
ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
- return;
+ return false;
}
MemberOffset field_offset(inst->VRegC_22c());
const bool is_volatile = false; // iput-x-quick only on non volatile fields.
@@ -767,6 +775,7 @@
default:
LOG(FATAL) << "Unreachable: " << field_type;
}
+ return true;
}
static inline String* ResolveString(Thread* self, MethodHelper& mh, uint32_t string_idx)
@@ -783,52 +792,64 @@
return mh.ResolveString(string_idx);
}
-static inline void DoIntDivide(ShadowFrame& shadow_frame, size_t result_reg,
+static inline bool DoIntDivide(ShadowFrame& shadow_frame, size_t result_reg,
int32_t dividend, int32_t divisor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (UNLIKELY(divisor == 0)) {
ThrowArithmeticExceptionDivideByZero();
- } else if (UNLIKELY(dividend == kMinInt && divisor == -1)) {
+ return false;
+ }
+ if (UNLIKELY(dividend == kMinInt && divisor == -1)) {
shadow_frame.SetVReg(result_reg, kMinInt);
} else {
shadow_frame.SetVReg(result_reg, dividend / divisor);
}
+ return true;
}
-static inline void DoIntRemainder(ShadowFrame& shadow_frame, size_t result_reg,
+static inline bool DoIntRemainder(ShadowFrame& shadow_frame, size_t result_reg,
int32_t dividend, int32_t divisor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (UNLIKELY(divisor == 0)) {
ThrowArithmeticExceptionDivideByZero();
- } else if (UNLIKELY(dividend == kMinInt && divisor == -1)) {
+ return false;
+ }
+ if (UNLIKELY(dividend == kMinInt && divisor == -1)) {
shadow_frame.SetVReg(result_reg, 0);
} else {
shadow_frame.SetVReg(result_reg, dividend % divisor);
}
+ return true;
}
-static inline void DoLongDivide(ShadowFrame& shadow_frame, size_t result_reg,
+static inline bool DoLongDivide(ShadowFrame& shadow_frame, size_t result_reg,
int64_t dividend, int64_t divisor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (UNLIKELY(divisor == 0)) {
ThrowArithmeticExceptionDivideByZero();
- } else if (UNLIKELY(dividend == kMinLong && divisor == -1)) {
+ return false;
+ }
+ if (UNLIKELY(dividend == kMinLong && divisor == -1)) {
shadow_frame.SetVRegLong(result_reg, kMinLong);
} else {
shadow_frame.SetVRegLong(result_reg, dividend / divisor);
}
+ return true;
}
-static inline void DoLongRemainder(ShadowFrame& shadow_frame, size_t result_reg,
+static inline bool DoLongRemainder(ShadowFrame& shadow_frame, size_t result_reg,
int64_t dividend, int64_t divisor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (UNLIKELY(divisor == 0)) {
ThrowArithmeticExceptionDivideByZero();
- } else if (UNLIKELY(dividend == kMinLong && divisor == -1)) {
+ return false;
+ }
+ if (UNLIKELY(dividend == kMinLong && divisor == -1)) {
shadow_frame.SetVRegLong(result_reg, 0);
} else {
shadow_frame.SetVRegLong(result_reg, dividend % divisor);
}
+ return true;
}
// TODO: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) which is failing due to template
@@ -954,7 +975,9 @@
self->VerifyStack();
ThrowLocation throw_location;
mirror::Throwable* exception = self->GetException(&throw_location);
- uint32_t found_dex_pc = shadow_frame.GetMethod()->FindCatchBlock(exception->GetClass(), dex_pc);
+ bool clear_exception;
+ uint32_t found_dex_pc = shadow_frame.GetMethod()->FindCatchBlock(exception->GetClass(), dex_pc,
+ &clear_exception);
if (found_dex_pc == DexFile::kDexNoIndex) {
instrumentation->MethodUnwindEvent(self, this_object_ref.get(),
shadow_frame.GetMethod(), dex_pc);
@@ -963,6 +986,9 @@
instrumentation->ExceptionCaughtEvent(self, throw_location,
shadow_frame.GetMethod(),
found_dex_pc, exception);
+ if (clear_exception) {
+ self->ClearException();
+ }
return Instruction::At(insns + found_dex_pc);
}
}
@@ -975,13 +1001,9 @@
return JValue(); /* Handled in caller. */ \
}
-#define POSSIBLY_HANDLE_PENDING_EXCEPTION(next_function) \
- if (UNLIKELY(self->IsExceptionPending())) { \
- inst = FindNextInstructionFollowingException(self, shadow_frame, inst->GetDexPc(insns), insns, \
- this_object_ref, instrumentation); \
- if (inst == NULL) { \
- return JValue(); /* Handled in caller. */ \
- } \
+#define POSSIBLY_HANDLE_PENDING_EXCEPTION(is_exception_pending, next_function) \
+ if (UNLIKELY(is_exception_pending)) { \
+ HANDLE_PENDING_EXCEPTION(); \
} else { \
inst = inst->next_function(); \
}
@@ -1013,28 +1035,29 @@
return JValue();
}
self->VerifyStack();
- instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
- const uint16_t* const insns = code_item->insns_;
+ instrumentation::Instrumentation* const instrumentation = Runtime::Current()->GetInstrumentation();
// As the 'this' object won't change during the execution of current code, we
// want to cache it in local variables. Nevertheless, in order to let the
// garbage collector access it, we store it into sirt references.
SirtRef<Object> this_object_ref(self, shadow_frame.GetThisObject(code_item->ins_size_));
- const Instruction* inst = Instruction::At(insns + shadow_frame.GetDexPC());
- if (inst->GetDexPc(insns) == 0) { // We are entering the method as opposed to deoptimizing..
+ uint32_t dex_pc = shadow_frame.GetDexPC();
+ if (LIKELY(dex_pc == 0)) { // We are entering the method as opposed to deoptimizing..
if (UNLIKELY(instrumentation->HasMethodEntryListeners())) {
instrumentation->MethodEnterEvent(self, this_object_ref.get(),
shadow_frame.GetMethod(), 0);
}
}
+ const uint16_t* const insns = code_item->insns_;
+ const Instruction* inst = Instruction::At(insns + dex_pc);
while (true) {
+ dex_pc = inst->GetDexPc(insns);
+ shadow_frame.SetDexPC(dex_pc);
if (UNLIKELY(self->TestAllFlags())) {
CheckSuspend(self);
}
- const uint32_t dex_pc = inst->GetDexPc(insns);
- shadow_frame.SetDexPC(dex_pc);
- if (instrumentation->HasDexPcListeners()) {
+ if (UNLIKELY(instrumentation->HasDexPcListeners())) {
instrumentation->DexPcMovedEvent(self, this_object_ref.get(),
shadow_frame.GetMethod(), dex_pc);
}
@@ -1200,8 +1223,8 @@
}
case Instruction::CONST_4: {
PREAMBLE();
- uint32_t dst = inst->VRegA_11n();
- int32_t val = inst->VRegB_11n();
+ uint4_t dst = inst->VRegA_11n();
+ int4_t val = inst->VRegB_11n();
shadow_frame.SetVReg(dst, val);
if (val == 0) {
shadow_frame.SetVRegReference(dst, NULL);
@@ -1211,8 +1234,8 @@
}
case Instruction::CONST_16: {
PREAMBLE();
- uint32_t dst = inst->VRegA_21s();
- int32_t val = inst->VRegB_21s();
+ uint8_t dst = inst->VRegA_21s();
+ int16_t val = inst->VRegB_21s();
shadow_frame.SetVReg(dst, val);
if (val == 0) {
shadow_frame.SetVRegReference(dst, NULL);
@@ -1222,7 +1245,7 @@
}
case Instruction::CONST: {
PREAMBLE();
- uint32_t dst = inst->VRegA_31i();
+ uint8_t dst = inst->VRegA_31i();
int32_t val = inst->VRegB_31i();
shadow_frame.SetVReg(dst, val);
if (val == 0) {
@@ -1233,7 +1256,7 @@
}
case Instruction::CONST_HIGH16: {
PREAMBLE();
- uint32_t dst = inst->VRegA_21h();
+ uint8_t dst = inst->VRegA_21h();
int32_t val = static_cast<int32_t>(inst->VRegB_21h() << 16);
shadow_frame.SetVReg(dst, val);
if (val == 0) {
@@ -1304,7 +1327,7 @@
HANDLE_PENDING_EXCEPTION();
} else {
DoMonitorEnter(self, obj);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_1xx);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
}
break;
}
@@ -1316,7 +1339,7 @@
HANDLE_PENDING_EXCEPTION();
} else {
DoMonitorExit(self, obj);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_1xx);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
}
break;
}
@@ -1391,22 +1414,14 @@
PREAMBLE();
bool success = DoFilledNewArray<false, do_access_check>(inst, shadow_frame,
self, &result_register);
- if (LIKELY(success)) {
- inst = inst->Next_3xx();
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::FILLED_NEW_ARRAY_RANGE: {
PREAMBLE();
bool success = DoFilledNewArray<true, do_access_check>(inst, shadow_frame,
self, &result_register);
- if (LIKELY(success)) {
- inst = inst->Next_3xx();
- } else {
- HANDLE_PENDING_EXCEPTION();
- }
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
}
case Instruction::FILL_ARRAY_DATA: {
@@ -1934,236 +1949,282 @@
}
break;
}
- case Instruction::IGET_BOOLEAN:
+ case Instruction::IGET_BOOLEAN: {
PREAMBLE();
- DoFieldGet<InstancePrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::IGET_BYTE:
+ }
+ case Instruction::IGET_BYTE: {
PREAMBLE();
- DoFieldGet<InstancePrimitiveRead, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::IGET_CHAR:
+ }
+ case Instruction::IGET_CHAR: {
PREAMBLE();
- DoFieldGet<InstancePrimitiveRead, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::IGET_SHORT:
+ }
+ case Instruction::IGET_SHORT: {
PREAMBLE();
- DoFieldGet<InstancePrimitiveRead, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::IGET:
+ }
+ case Instruction::IGET: {
PREAMBLE();
- DoFieldGet<InstancePrimitiveRead, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::IGET_WIDE:
+ }
+ case Instruction::IGET_WIDE: {
PREAMBLE();
- DoFieldGet<InstancePrimitiveRead, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoFieldGet<InstancePrimitiveRead, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::IGET_OBJECT:
+ }
+ case Instruction::IGET_OBJECT: {
PREAMBLE();
- DoFieldGet<InstanceObjectRead, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoFieldGet<InstanceObjectRead, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::IGET_QUICK:
+ }
+ case Instruction::IGET_QUICK: {
PREAMBLE();
- DoIGetQuick<Primitive::kPrimInt>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoIGetQuick<Primitive::kPrimInt>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::IGET_WIDE_QUICK:
+ }
+ case Instruction::IGET_WIDE_QUICK: {
PREAMBLE();
- DoIGetQuick<Primitive::kPrimLong>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoIGetQuick<Primitive::kPrimLong>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::IGET_OBJECT_QUICK:
+ }
+ case Instruction::IGET_OBJECT_QUICK: {
PREAMBLE();
- DoIGetQuick<Primitive::kPrimNot>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoIGetQuick<Primitive::kPrimNot>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::SGET_BOOLEAN:
+ }
+ case Instruction::SGET_BOOLEAN: {
PREAMBLE();
- DoFieldGet<StaticPrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::SGET_BYTE:
+ }
+ case Instruction::SGET_BYTE: {
PREAMBLE();
- DoFieldGet<StaticPrimitiveRead, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::SGET_CHAR:
+ }
+ case Instruction::SGET_CHAR: {
PREAMBLE();
- DoFieldGet<StaticPrimitiveRead, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::SGET_SHORT:
+ }
+ case Instruction::SGET_SHORT: {
PREAMBLE();
- DoFieldGet<StaticPrimitiveRead, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::SGET:
+ }
+ case Instruction::SGET: {
PREAMBLE();
- DoFieldGet<StaticPrimitiveRead, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::SGET_WIDE:
+ }
+ case Instruction::SGET_WIDE: {
PREAMBLE();
- DoFieldGet<StaticPrimitiveRead, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoFieldGet<StaticPrimitiveRead, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::SGET_OBJECT:
+ }
+ case Instruction::SGET_OBJECT: {
PREAMBLE();
- DoFieldGet<StaticObjectRead, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoFieldGet<StaticObjectRead, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::IPUT_BOOLEAN:
+ }
+ case Instruction::IPUT_BOOLEAN: {
PREAMBLE();
- DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::IPUT_BYTE:
+ }
+ case Instruction::IPUT_BYTE: {
PREAMBLE();
- DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::IPUT_CHAR:
+ }
+ case Instruction::IPUT_CHAR: {
PREAMBLE();
- DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::IPUT_SHORT:
+ }
+ case Instruction::IPUT_SHORT: {
PREAMBLE();
- DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::IPUT:
+ }
+ case Instruction::IPUT: {
PREAMBLE();
- DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::IPUT_WIDE:
+ }
+ case Instruction::IPUT_WIDE: {
PREAMBLE();
- DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoFieldPut<InstancePrimitiveWrite, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::IPUT_OBJECT:
+ }
+ case Instruction::IPUT_OBJECT: {
PREAMBLE();
- DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::IPUT_QUICK:
+ }
+ case Instruction::IPUT_QUICK: {
PREAMBLE();
- DoIPutQuick<Primitive::kPrimInt>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoIPutQuick<Primitive::kPrimInt>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::IPUT_WIDE_QUICK:
+ }
+ case Instruction::IPUT_WIDE_QUICK: {
PREAMBLE();
- DoIPutQuick<Primitive::kPrimLong>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoIPutQuick<Primitive::kPrimLong>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::IPUT_OBJECT_QUICK:
+ }
+ case Instruction::IPUT_OBJECT_QUICK: {
PREAMBLE();
- DoIPutQuick<Primitive::kPrimNot>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoIPutQuick<Primitive::kPrimNot>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::SPUT_BOOLEAN:
+ }
+ case Instruction::SPUT_BOOLEAN: {
PREAMBLE();
- DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimBoolean, do_access_check>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::SPUT_BYTE:
+ }
+ case Instruction::SPUT_BYTE: {
PREAMBLE();
- DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimByte, do_access_check>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::SPUT_CHAR:
+ }
+ case Instruction::SPUT_CHAR: {
PREAMBLE();
- DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimChar, do_access_check>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::SPUT_SHORT:
+ }
+ case Instruction::SPUT_SHORT: {
PREAMBLE();
- DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimShort, do_access_check>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::SPUT:
+ }
+ case Instruction::SPUT: {
PREAMBLE();
- DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimInt, do_access_check>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::SPUT_WIDE:
+ }
+ case Instruction::SPUT_WIDE: {
PREAMBLE();
- DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoFieldPut<StaticPrimitiveWrite, Primitive::kPrimLong, do_access_check>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::SPUT_OBJECT:
+ }
+ case Instruction::SPUT_OBJECT: {
PREAMBLE();
- DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, do_access_check>(self, shadow_frame, inst);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::INVOKE_VIRTUAL:
+ }
+ case Instruction::INVOKE_VIRTUAL: {
PREAMBLE();
- DoInvoke<kVirtual, false, do_access_check>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx);
+ bool success = DoInvoke<kVirtual, false, do_access_check>(self, shadow_frame, inst, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
- case Instruction::INVOKE_VIRTUAL_RANGE:
+ }
+ case Instruction::INVOKE_VIRTUAL_RANGE: {
PREAMBLE();
- DoInvoke<kVirtual, true, do_access_check>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx);
+ bool success = DoInvoke<kVirtual, true, do_access_check>(self, shadow_frame, inst, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
- case Instruction::INVOKE_SUPER:
+ }
+ case Instruction::INVOKE_SUPER: {
PREAMBLE();
- DoInvoke<kSuper, false, do_access_check>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx);
+ bool success = DoInvoke<kSuper, false, do_access_check>(self, shadow_frame, inst, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
- case Instruction::INVOKE_SUPER_RANGE:
+ }
+ case Instruction::INVOKE_SUPER_RANGE: {
PREAMBLE();
- DoInvoke<kSuper, true, do_access_check>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx);
+ bool success = DoInvoke<kSuper, true, do_access_check>(self, shadow_frame, inst, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
- case Instruction::INVOKE_DIRECT:
+ }
+ case Instruction::INVOKE_DIRECT: {
PREAMBLE();
- DoInvoke<kDirect, false, do_access_check>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx);
+ bool success = DoInvoke<kDirect, false, do_access_check>(self, shadow_frame, inst, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
- case Instruction::INVOKE_DIRECT_RANGE:
+ }
+ case Instruction::INVOKE_DIRECT_RANGE: {
PREAMBLE();
- DoInvoke<kDirect, true, do_access_check>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx);
+ bool success = DoInvoke<kDirect, true, do_access_check>(self, shadow_frame, inst, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
- case Instruction::INVOKE_INTERFACE:
+ }
+ case Instruction::INVOKE_INTERFACE: {
PREAMBLE();
- DoInvoke<kInterface, false, do_access_check>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx);
+ bool success = DoInvoke<kInterface, false, do_access_check>(self, shadow_frame, inst, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
- case Instruction::INVOKE_INTERFACE_RANGE:
+ }
+ case Instruction::INVOKE_INTERFACE_RANGE: {
PREAMBLE();
- DoInvoke<kInterface, true, do_access_check>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx);
+ bool success = DoInvoke<kInterface, true, do_access_check>(self, shadow_frame, inst, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
- case Instruction::INVOKE_STATIC:
+ }
+ case Instruction::INVOKE_STATIC: {
PREAMBLE();
- DoInvoke<kStatic, false, do_access_check>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx);
+ bool success = DoInvoke<kStatic, false, do_access_check>(self, shadow_frame, inst, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
- case Instruction::INVOKE_STATIC_RANGE:
+ }
+ case Instruction::INVOKE_STATIC_RANGE: {
PREAMBLE();
- DoInvoke<kStatic, true, do_access_check>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx);
+ bool success = DoInvoke<kStatic, true, do_access_check>(self, shadow_frame, inst, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
- case Instruction::INVOKE_VIRTUAL_QUICK:
+ }
+ case Instruction::INVOKE_VIRTUAL_QUICK: {
PREAMBLE();
- DoInvokeVirtualQuick<false>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx);
+ bool success = DoInvokeVirtualQuick<false>(self, shadow_frame, inst, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
- case Instruction::INVOKE_VIRTUAL_RANGE_QUICK:
+ }
+ case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
PREAMBLE();
- DoInvokeVirtualQuick<true>(self, shadow_frame, inst, &result_register);
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_3xx);
+ bool success = DoInvokeVirtualQuick<true>(self, shadow_frame, inst, &result_register);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_3xx);
break;
+ }
case Instruction::NEG_INT:
PREAMBLE();
shadow_frame.SetVReg(inst->VRegA_12x(), -shadow_frame.GetVReg(inst->VRegB_12x()));
@@ -2341,20 +2402,22 @@
shadow_frame.GetVReg(inst->VRegC_23x()));
inst = inst->Next_2xx();
break;
- case Instruction::DIV_INT:
+ case Instruction::DIV_INT: {
PREAMBLE();
- DoIntDivide(shadow_frame, inst->VRegA_23x(),
- shadow_frame.GetVReg(inst->VRegB_23x()),
- shadow_frame.GetVReg(inst->VRegC_23x()));
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoIntDivide(shadow_frame, inst->VRegA_23x(),
+ shadow_frame.GetVReg(inst->VRegB_23x()),
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::REM_INT:
+ }
+ case Instruction::REM_INT: {
PREAMBLE();
- DoIntRemainder(shadow_frame, inst->VRegA_23x(),
- shadow_frame.GetVReg(inst->VRegB_23x()),
- shadow_frame.GetVReg(inst->VRegC_23x()));
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoIntRemainder(shadow_frame, inst->VRegA_23x(),
+ shadow_frame.GetVReg(inst->VRegB_23x()),
+ shadow_frame.GetVReg(inst->VRegC_23x()));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
+ }
case Instruction::SHL_INT:
PREAMBLE();
shadow_frame.SetVReg(inst->VRegA_23x(),
@@ -2423,14 +2486,14 @@
DoLongDivide(shadow_frame, inst->VRegA_23x(),
shadow_frame.GetVRegLong(inst->VRegB_23x()),
shadow_frame.GetVRegLong(inst->VRegC_23x()));
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_2xx);
break;
case Instruction::REM_LONG:
PREAMBLE();
DoLongRemainder(shadow_frame, inst->VRegA_23x(),
shadow_frame.GetVRegLong(inst->VRegB_23x()),
shadow_frame.GetVRegLong(inst->VRegC_23x()));
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_2xx);
break;
case Instruction::AND_LONG:
PREAMBLE();
@@ -2546,7 +2609,7 @@
break;
case Instruction::ADD_INT_2ADDR: {
PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
+ uint4_t vregA = inst->VRegA_12x();
shadow_frame.SetVReg(vregA,
shadow_frame.GetVReg(vregA) +
shadow_frame.GetVReg(inst->VRegB_12x()));
@@ -2555,7 +2618,7 @@
}
case Instruction::SUB_INT_2ADDR: {
PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
+ uint4_t vregA = inst->VRegA_12x();
shadow_frame.SetVReg(vregA,
shadow_frame.GetVReg(vregA) -
shadow_frame.GetVReg(inst->VRegB_12x()));
@@ -2564,7 +2627,7 @@
}
case Instruction::MUL_INT_2ADDR: {
PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
+ uint4_t vregA = inst->VRegA_12x();
shadow_frame.SetVReg(vregA,
shadow_frame.GetVReg(vregA) *
shadow_frame.GetVReg(inst->VRegB_12x()));
@@ -2573,23 +2636,23 @@
}
case Instruction::DIV_INT_2ADDR: {
PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
- DoIntDivide(shadow_frame, vregA, shadow_frame.GetVReg(vregA),
- shadow_frame.GetVReg(inst->VRegB_12x()));
- inst = inst->Next_1xx();
+ uint4_t vregA = inst->VRegA_12x();
+ bool success = DoIntDivide(shadow_frame, vregA, shadow_frame.GetVReg(vregA),
+ shadow_frame.GetVReg(inst->VRegB_12x()));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_1xx);
break;
}
case Instruction::REM_INT_2ADDR: {
PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
- DoIntRemainder(shadow_frame, vregA, shadow_frame.GetVReg(vregA),
- shadow_frame.GetVReg(inst->VRegB_12x()));
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_1xx);
+ uint4_t vregA = inst->VRegA_12x();
+ bool success = DoIntRemainder(shadow_frame, vregA, shadow_frame.GetVReg(vregA),
+ shadow_frame.GetVReg(inst->VRegB_12x()));
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_1xx);
break;
}
case Instruction::SHL_INT_2ADDR: {
PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
+ uint4_t vregA = inst->VRegA_12x();
shadow_frame.SetVReg(vregA,
shadow_frame.GetVReg(vregA) <<
(shadow_frame.GetVReg(inst->VRegB_12x()) & 0x1f));
@@ -2598,7 +2661,7 @@
}
case Instruction::SHR_INT_2ADDR: {
PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
+ uint4_t vregA = inst->VRegA_12x();
shadow_frame.SetVReg(vregA,
shadow_frame.GetVReg(vregA) >>
(shadow_frame.GetVReg(inst->VRegB_12x()) & 0x1f));
@@ -2607,7 +2670,7 @@
}
case Instruction::USHR_INT_2ADDR: {
PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
+ uint4_t vregA = inst->VRegA_12x();
shadow_frame.SetVReg(vregA,
static_cast<uint32_t>(shadow_frame.GetVReg(vregA)) >>
(shadow_frame.GetVReg(inst->VRegB_12x()) & 0x1f));
@@ -2616,7 +2679,7 @@
}
case Instruction::AND_INT_2ADDR: {
PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
+ uint4_t vregA = inst->VRegA_12x();
shadow_frame.SetVReg(vregA,
shadow_frame.GetVReg(vregA) &
shadow_frame.GetVReg(inst->VRegB_12x()));
@@ -2625,7 +2688,7 @@
}
case Instruction::OR_INT_2ADDR: {
PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
+ uint4_t vregA = inst->VRegA_12x();
shadow_frame.SetVReg(vregA,
shadow_frame.GetVReg(vregA) |
shadow_frame.GetVReg(inst->VRegB_12x()));
@@ -2634,7 +2697,7 @@
}
case Instruction::XOR_INT_2ADDR: {
PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
+ uint4_t vregA = inst->VRegA_12x();
shadow_frame.SetVReg(vregA,
shadow_frame.GetVReg(vregA) ^
shadow_frame.GetVReg(inst->VRegB_12x()));
@@ -2643,7 +2706,7 @@
}
case Instruction::ADD_LONG_2ADDR: {
PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
+ uint4_t vregA = inst->VRegA_12x();
shadow_frame.SetVRegLong(vregA,
shadow_frame.GetVRegLong(vregA) +
shadow_frame.GetVRegLong(inst->VRegB_12x()));
@@ -2652,7 +2715,7 @@
}
case Instruction::SUB_LONG_2ADDR: {
PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
+ uint4_t vregA = inst->VRegA_12x();
shadow_frame.SetVRegLong(vregA,
shadow_frame.GetVRegLong(vregA) -
shadow_frame.GetVRegLong(inst->VRegB_12x()));
@@ -2661,7 +2724,7 @@
}
case Instruction::MUL_LONG_2ADDR: {
PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
+ uint4_t vregA = inst->VRegA_12x();
shadow_frame.SetVRegLong(vregA,
shadow_frame.GetVRegLong(vregA) *
shadow_frame.GetVRegLong(inst->VRegB_12x()));
@@ -2670,23 +2733,23 @@
}
case Instruction::DIV_LONG_2ADDR: {
PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
+ uint4_t vregA = inst->VRegA_12x();
DoLongDivide(shadow_frame, vregA, shadow_frame.GetVRegLong(vregA),
shadow_frame.GetVRegLong(inst->VRegB_12x()));
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_1xx);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
break;
}
case Instruction::REM_LONG_2ADDR: {
PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
+ uint4_t vregA = inst->VRegA_12x();
DoLongRemainder(shadow_frame, vregA, shadow_frame.GetVRegLong(vregA),
shadow_frame.GetVRegLong(inst->VRegB_12x()));
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_1xx);
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(self->IsExceptionPending(), Next_1xx);
break;
}
case Instruction::AND_LONG_2ADDR: {
PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
+ uint4_t vregA = inst->VRegA_12x();
shadow_frame.SetVRegLong(vregA,
shadow_frame.GetVRegLong(vregA) &
shadow_frame.GetVRegLong(inst->VRegB_12x()));
@@ -2695,7 +2758,7 @@
}
case Instruction::OR_LONG_2ADDR: {
PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
+ uint4_t vregA = inst->VRegA_12x();
shadow_frame.SetVRegLong(vregA,
shadow_frame.GetVRegLong(vregA) |
shadow_frame.GetVRegLong(inst->VRegB_12x()));
@@ -2704,7 +2767,7 @@
}
case Instruction::XOR_LONG_2ADDR: {
PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
+ uint4_t vregA = inst->VRegA_12x();
shadow_frame.SetVRegLong(vregA,
shadow_frame.GetVRegLong(vregA) ^
shadow_frame.GetVRegLong(inst->VRegB_12x()));
@@ -2713,7 +2776,7 @@
}
case Instruction::SHL_LONG_2ADDR: {
PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
+ uint4_t vregA = inst->VRegA_12x();
shadow_frame.SetVRegLong(vregA,
shadow_frame.GetVRegLong(vregA) <<
(shadow_frame.GetVReg(inst->VRegB_12x()) & 0x3f));
@@ -2722,7 +2785,7 @@
}
case Instruction::SHR_LONG_2ADDR: {
PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
+ uint4_t vregA = inst->VRegA_12x();
shadow_frame.SetVRegLong(vregA,
shadow_frame.GetVRegLong(vregA) >>
(shadow_frame.GetVReg(inst->VRegB_12x()) & 0x3f));
@@ -2731,7 +2794,7 @@
}
case Instruction::USHR_LONG_2ADDR: {
PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
+ uint4_t vregA = inst->VRegA_12x();
shadow_frame.SetVRegLong(vregA,
static_cast<uint64_t>(shadow_frame.GetVRegLong(vregA)) >>
(shadow_frame.GetVReg(inst->VRegB_12x()) & 0x3f));
@@ -2740,7 +2803,7 @@
}
case Instruction::ADD_FLOAT_2ADDR: {
PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
+ uint4_t vregA = inst->VRegA_12x();
shadow_frame.SetVRegFloat(vregA,
shadow_frame.GetVRegFloat(vregA) +
shadow_frame.GetVRegFloat(inst->VRegB_12x()));
@@ -2749,7 +2812,7 @@
}
case Instruction::SUB_FLOAT_2ADDR: {
PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
+ uint4_t vregA = inst->VRegA_12x();
shadow_frame.SetVRegFloat(vregA,
shadow_frame.GetVRegFloat(vregA) -
shadow_frame.GetVRegFloat(inst->VRegB_12x()));
@@ -2758,7 +2821,7 @@
}
case Instruction::MUL_FLOAT_2ADDR: {
PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
+ uint4_t vregA = inst->VRegA_12x();
shadow_frame.SetVRegFloat(vregA,
shadow_frame.GetVRegFloat(vregA) *
shadow_frame.GetVRegFloat(inst->VRegB_12x()));
@@ -2767,7 +2830,7 @@
}
case Instruction::DIV_FLOAT_2ADDR: {
PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
+ uint4_t vregA = inst->VRegA_12x();
shadow_frame.SetVRegFloat(vregA,
shadow_frame.GetVRegFloat(vregA) /
shadow_frame.GetVRegFloat(inst->VRegB_12x()));
@@ -2776,7 +2839,7 @@
}
case Instruction::REM_FLOAT_2ADDR: {
PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
+ uint4_t vregA = inst->VRegA_12x();
shadow_frame.SetVRegFloat(vregA,
fmodf(shadow_frame.GetVRegFloat(vregA),
shadow_frame.GetVRegFloat(inst->VRegB_12x())));
@@ -2785,7 +2848,7 @@
}
case Instruction::ADD_DOUBLE_2ADDR: {
PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
+ uint4_t vregA = inst->VRegA_12x();
shadow_frame.SetVRegDouble(vregA,
shadow_frame.GetVRegDouble(vregA) +
shadow_frame.GetVRegDouble(inst->VRegB_12x()));
@@ -2794,7 +2857,7 @@
}
case Instruction::SUB_DOUBLE_2ADDR: {
PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
+ uint4_t vregA = inst->VRegA_12x();
shadow_frame.SetVRegDouble(vregA,
shadow_frame.GetVRegDouble(vregA) -
shadow_frame.GetVRegDouble(inst->VRegB_12x()));
@@ -2803,7 +2866,7 @@
}
case Instruction::MUL_DOUBLE_2ADDR: {
PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
+ uint4_t vregA = inst->VRegA_12x();
shadow_frame.SetVRegDouble(vregA,
shadow_frame.GetVRegDouble(vregA) *
shadow_frame.GetVRegDouble(inst->VRegB_12x()));
@@ -2812,7 +2875,7 @@
}
case Instruction::DIV_DOUBLE_2ADDR: {
PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
+ uint4_t vregA = inst->VRegA_12x();
shadow_frame.SetVRegDouble(vregA,
shadow_frame.GetVRegDouble(vregA) /
shadow_frame.GetVRegDouble(inst->VRegB_12x()));
@@ -2821,7 +2884,7 @@
}
case Instruction::REM_DOUBLE_2ADDR: {
PREAMBLE();
- uint32_t vregA = inst->VRegA_12x();
+ uint4_t vregA = inst->VRegA_12x();
shadow_frame.SetVRegDouble(vregA,
fmod(shadow_frame.GetVRegDouble(vregA),
shadow_frame.GetVRegDouble(inst->VRegB_12x())));
@@ -2849,18 +2912,20 @@
inst->VRegC_22s());
inst = inst->Next_2xx();
break;
- case Instruction::DIV_INT_LIT16:
+ case Instruction::DIV_INT_LIT16: {
PREAMBLE();
- DoIntDivide(shadow_frame, inst->VRegA_22s(),
- shadow_frame.GetVReg(inst->VRegB_22s()), inst->VRegC_22s());
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoIntDivide(shadow_frame, inst->VRegA_22s(),
+ shadow_frame.GetVReg(inst->VRegB_22s()), inst->VRegC_22s());
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::REM_INT_LIT16:
+ }
+ case Instruction::REM_INT_LIT16: {
PREAMBLE();
- DoIntRemainder(shadow_frame, inst->VRegA_22s(),
- shadow_frame.GetVReg(inst->VRegB_22s()), inst->VRegC_22s());
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoIntRemainder(shadow_frame, inst->VRegA_22s(),
+ shadow_frame.GetVReg(inst->VRegB_22s()), inst->VRegC_22s());
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
+ }
case Instruction::AND_INT_LIT16:
PREAMBLE();
shadow_frame.SetVReg(inst->VRegA_22s(),
@@ -2903,18 +2968,20 @@
inst->VRegC_22b());
inst = inst->Next_2xx();
break;
- case Instruction::DIV_INT_LIT8:
+ case Instruction::DIV_INT_LIT8: {
PREAMBLE();
- DoIntDivide(shadow_frame, inst->VRegA_22b(),
- shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b());
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoIntDivide(shadow_frame, inst->VRegA_22b(),
+ shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b());
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
- case Instruction::REM_INT_LIT8:
+ }
+ case Instruction::REM_INT_LIT8: {
PREAMBLE();
- DoIntRemainder(shadow_frame, inst->VRegA_22b(),
- shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b());
- POSSIBLY_HANDLE_PENDING_EXCEPTION(Next_2xx);
+ bool success = DoIntRemainder(shadow_frame, inst->VRegA_22b(),
+ shadow_frame.GetVReg(inst->VRegB_22b()), inst->VRegC_22b());
+ POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
break;
+ }
case Instruction::AND_INT_LIT8:
PREAMBLE();
shadow_frame.SetVReg(inst->VRegA_22b(),
diff --git a/runtime/mirror/abstract_method-inl.h b/runtime/mirror/abstract_method-inl.h
index 2df1367..d235e3e 100644
--- a/runtime/mirror/abstract_method-inl.h
+++ b/runtime/mirror/abstract_method-inl.h
@@ -20,9 +20,9 @@
#include "abstract_method.h"
#include "dex_file.h"
+#include "entrypoints/entrypoint_utils.h"
#include "object_array.h"
#include "runtime.h"
-#include "runtime_support.h"
namespace art {
namespace mirror {
diff --git a/runtime/mirror/abstract_method.cc b/runtime/mirror/abstract_method.cc
index 58ef5f7..d08708f 100644
--- a/runtime/mirror/abstract_method.cc
+++ b/runtime/mirror/abstract_method.cc
@@ -20,6 +20,7 @@
#include "base/stringpiece.h"
#include "class-inl.h"
#include "dex_file-inl.h"
+#include "dex_instruction.h"
#include "gc/accounting/card_table-inl.h"
#include "interpreter/interpreter.h"
#include "jni_internal.h"
@@ -225,7 +226,8 @@
return 0;
}
-uint32_t AbstractMethod::FindCatchBlock(Class* exception_type, uint32_t dex_pc) const {
+uint32_t AbstractMethod::FindCatchBlock(Class* exception_type, uint32_t dex_pc,
+ bool* has_no_move_exception) const {
MethodHelper mh(this);
const DexFile::CodeItem* code_item = mh.GetCodeItem();
// Iterate over the catch handlers associated with dex_pc
@@ -242,7 +244,11 @@
LOG(WARNING) << "Unresolved exception class when finding catch block: "
<< mh.GetTypeDescriptorFromTypeIdx(iter_type_idx);
} else if (iter_exception_type->IsAssignableFrom(exception_type)) {
- return it.GetHandlerAddress();
+ uint32_t found_dex_pc = it.GetHandlerAddress();
+ const Instruction* first_catch_instr =
+ Instruction::At(&mh.GetCodeItem()->insns_[found_dex_pc]);
+ *has_no_move_exception = (first_catch_instr->Opcode() != Instruction::MOVE_EXCEPTION);
+ return found_dex_pc;
}
}
// Handler not found
diff --git a/runtime/mirror/abstract_method.h b/runtime/mirror/abstract_method.h
index bbebece..2e6e262 100644
--- a/runtime/mirror/abstract_method.h
+++ b/runtime/mirror/abstract_method.h
@@ -407,8 +407,10 @@
uintptr_t ToFirstNativeSafepointPc(const uint32_t dex_pc)
const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Find the catch block for the given exception type and dex_pc
- uint32_t FindCatchBlock(Class* exception_type, uint32_t dex_pc) const
+ // Find the catch block for the given exception type and dex_pc. When a catch block is found,
+ // indicates whether the found catch block is responsible for clearing the exception or whether
+ // a move-exception instruction is present.
+ uint32_t FindCatchBlock(Class* exception_type, uint32_t dex_pc, bool* has_no_move_exception) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void SetClasses(Class* java_lang_reflect_Constructor, Class* java_lang_reflect_Method);
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index 53a1df9..540ff9f 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -26,6 +26,7 @@
#include "class_linker-inl.h"
#include "common_test.h"
#include "dex_file.h"
+#include "entrypoints/entrypoint_utils.h"
#include "field-inl.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/heap.h"
@@ -33,7 +34,6 @@
#include "abstract_method-inl.h"
#include "object-inl.h"
#include "object_array-inl.h"
-#include "runtime_support.h"
#include "sirt_ref.h"
#include "UniquePtr.h"
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 60624c2..e3ec3bc 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -20,6 +20,9 @@
#include "class_linker.h"
#include "common_throws.h"
#include "debugger.h"
+#include "gc/space/dlmalloc_space.h"
+#include "gc/space/large_object_space.h"
+#include "gc/space/space-inl.h"
#include "hprof/hprof.h"
#include "jni_internal.h"
#include "mirror/class.h"
@@ -234,6 +237,69 @@
return count;
}
+// We export the VM internal per-heap-space size/alloc/free metrics
+// for the zygote space, alloc space (application heap), and the large
+// object space for dumpsys meminfo. The other memory region data such
+// as PSS, private/shared dirty/shared data are available via
+// /proc/<pid>/smaps.
+static void VMDebug_getHeapSpaceStats(JNIEnv* env, jclass, jlongArray data) {
+ jlong* arr = reinterpret_cast<jlong*>(env->GetPrimitiveArrayCritical(data, 0));
+ if (arr == NULL || env->GetArrayLength(data) < 9) {
+ return;
+ }
+
+ size_t allocSize = 0;
+ size_t allocUsed = 0;
+ size_t zygoteSize = 0;
+ size_t zygoteUsed = 0;
+ size_t largeObjectsSize = 0;
+ size_t largeObjectsUsed = 0;
+
+ gc::Heap* heap = Runtime::Current()->GetHeap();
+ const std::vector<gc::space::ContinuousSpace*>& continuous_spaces = heap->GetContinuousSpaces();
+ const std::vector<gc::space::DiscontinuousSpace*>& discontinuous_spaces = heap->GetDiscontinuousSpaces();
+ typedef std::vector<gc::space::ContinuousSpace*>::const_iterator It;
+ for (It it = continuous_spaces.begin(), end = continuous_spaces.end(); it != end; ++it) {
+ gc::space::ContinuousSpace* space = *it;
+ if (space->IsImageSpace()) {
+ // Currently don't include the image space.
+ } else if (space->IsZygoteSpace()) {
+ gc::space::DlMallocSpace* dlmalloc_space = space->AsDlMallocSpace();
+ zygoteSize += dlmalloc_space->GetFootprint();
+ zygoteUsed += dlmalloc_space->GetBytesAllocated();
+ } else {
+ // This is the alloc space.
+ gc::space::DlMallocSpace* dlmalloc_space = space->AsDlMallocSpace();
+ allocSize += dlmalloc_space->GetFootprint();
+ allocUsed += dlmalloc_space->GetBytesAllocated();
+ }
+ }
+ typedef std::vector<gc::space::DiscontinuousSpace*>::const_iterator It2;
+ for (It2 it = discontinuous_spaces.begin(), end = discontinuous_spaces.end(); it != end; ++it) {
+ gc::space::DiscontinuousSpace* space = *it;
+ if (space->IsLargeObjectSpace()) {
+ largeObjectsSize += space->AsLargeObjectSpace()->GetBytesAllocated();
+ largeObjectsUsed += largeObjectsSize;
+ }
+ }
+
+ size_t allocFree = allocSize - allocUsed;
+ size_t zygoteFree = zygoteSize - zygoteUsed;
+ size_t largeObjectsFree = largeObjectsSize - largeObjectsUsed;
+
+ int j = 0;
+ arr[j++] = allocSize;
+ arr[j++] = allocUsed;
+ arr[j++] = allocFree;
+ arr[j++] = zygoteSize;
+ arr[j++] = zygoteUsed;
+ arr[j++] = zygoteFree;
+ arr[j++] = largeObjectsSize;
+ arr[j++] = largeObjectsUsed;
+ arr[j++] = largeObjectsFree;
+ env->ReleasePrimitiveArrayCritical(data, arr, 0);
+}
+
static JNINativeMethod gMethods[] = {
NATIVE_METHOD(VMDebug, countInstancesOfClass, "(Ljava/lang/Class;Z)J"),
NATIVE_METHOD(VMDebug, crash, "()V"),
@@ -241,6 +307,7 @@
NATIVE_METHOD(VMDebug, dumpHprofDataDdms, "()V"),
NATIVE_METHOD(VMDebug, dumpReferenceTables, "()V"),
NATIVE_METHOD(VMDebug, getAllocCount, "(I)I"),
+ NATIVE_METHOD(VMDebug, getHeapSpaceStats, "([J)V"),
NATIVE_METHOD(VMDebug, getInstructionCount, "([I)V"),
NATIVE_METHOD(VMDebug, getLoadedClassCount, "()I"),
NATIVE_METHOD(VMDebug, getVmFeatureList, "()[Ljava/lang/String;"),
diff --git a/runtime/oat/runtime/argument_visitor.h b/runtime/oat/runtime/argument_visitor.h
deleted file mode 100644
index aaf93f7..0000000
--- a/runtime/oat/runtime/argument_visitor.h
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_OAT_RUNTIME_ARGUMENT_VISITOR_H_
-#define ART_RUNTIME_OAT_RUNTIME_ARGUMENT_VISITOR_H_
-
-#include "object_utils.h"
-
-namespace art {
-
-// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame.
-class PortableArgumentVisitor {
- public:
-// Offset to first (not the Method*) argument in a Runtime::kRefAndArgs callee save frame.
-// Size of Runtime::kRefAndArgs callee save frame.
-// Size of Method* and register parameters in out stack arguments.
-#if defined(__arm__)
-#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 8
-#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 48
-#define PORTABLE_STACK_ARG_SKIP 0
-#elif defined(__mips__)
-#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4
-#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 64
-#define PORTABLE_STACK_ARG_SKIP 16
-#elif defined(__i386__)
-#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4
-#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 32
-#define PORTABLE_STACK_ARG_SKIP 4
-#else
-#error "Unsupported architecture"
-#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 0
-#define PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 0
-#define PORTABLE_STACK_ARG_SKIP 0
-#endif
-
- PortableArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
- caller_mh_(caller_mh),
- args_in_regs_(ComputeArgsInRegs(caller_mh)),
- num_params_(caller_mh.NumArgs()),
- reg_args_(reinterpret_cast<byte*>(sp) + PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET),
- stack_args_(reinterpret_cast<byte*>(sp) + PORTABLE_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE
- + PORTABLE_STACK_ARG_SKIP),
- cur_args_(reg_args_),
- cur_arg_index_(0),
- param_index_(0) {
- }
-
- virtual ~PortableArgumentVisitor() {}
-
- virtual void Visit() = 0;
-
- bool IsParamAReference() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return caller_mh_.IsParamAReference(param_index_);
- }
-
- bool IsParamALongOrDouble() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return caller_mh_.IsParamALongOrDouble(param_index_);
- }
-
- Primitive::Type GetParamPrimitiveType() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return caller_mh_.GetParamPrimitiveType(param_index_);
- }
-
- byte* GetParamAddress() const {
- return cur_args_ + (cur_arg_index_ * kPointerSize);
- }
-
- void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- for (cur_arg_index_ = 0; cur_arg_index_ < args_in_regs_ && param_index_ < num_params_; ) {
-#if (defined(__arm__) || defined(__mips__))
- if (IsParamALongOrDouble() && cur_arg_index_ == 2) {
- break;
- }
-#endif
- Visit();
- cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1);
- param_index_++;
- }
- cur_args_ = stack_args_;
- cur_arg_index_ = 0;
- while (param_index_ < num_params_) {
-#if (defined(__arm__) || defined(__mips__))
- if (IsParamALongOrDouble() && cur_arg_index_ % 2 != 0) {
- cur_arg_index_++;
- }
-#endif
- Visit();
- cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1);
- param_index_++;
- }
- }
-
- private:
- static size_t ComputeArgsInRegs(MethodHelper& mh) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-#if (defined(__i386__))
- return 0;
-#else
- size_t args_in_regs = 0;
- size_t num_params = mh.NumArgs();
- for (size_t i = 0; i < num_params; i++) {
- args_in_regs = args_in_regs + (mh.IsParamALongOrDouble(i) ? 2 : 1);
- if (args_in_regs > 3) {
- args_in_regs = 3;
- break;
- }
- }
- return args_in_regs;
-#endif
- }
- MethodHelper& caller_mh_;
- const size_t args_in_regs_;
- const size_t num_params_;
- byte* const reg_args_;
- byte* const stack_args_;
- byte* cur_args_;
- size_t cur_arg_index_;
- size_t param_index_;
-};
-
-// Visits the arguments as saved to the stack by a Runtime::kRefAndArgs callee save frame.
-class QuickArgumentVisitor {
- public:
-// Offset to first (not the Method*) argument in a Runtime::kRefAndArgs callee save frame.
-// Size of Runtime::kRefAndArgs callee save frame.
-// Size of Method* and register parameters in out stack arguments.
-#if defined(__arm__)
-#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 8
-#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 48
-#define QUICK_STACK_ARG_SKIP 16
-#elif defined(__mips__)
-#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4
-#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 64
-#define QUICK_STACK_ARG_SKIP 16
-#elif defined(__i386__)
-#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 4
-#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 32
-#define QUICK_STACK_ARG_SKIP 16
-#else
-#error "Unsupported architecture"
-#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET 0
-#define QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE 0
-#define QUICK_STACK_ARG_SKIP 0
-#endif
-
- QuickArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
- caller_mh_(caller_mh),
- args_in_regs_(ComputeArgsInRegs(caller_mh)),
- num_params_(caller_mh.NumArgs()),
- reg_args_(reinterpret_cast<byte*>(sp) + QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__R1_OFFSET),
- stack_args_(reinterpret_cast<byte*>(sp) + QUICK_CALLEE_SAVE_FRAME__REF_AND_ARGS__FRAME_SIZE
- + QUICK_STACK_ARG_SKIP),
- cur_args_(reg_args_),
- cur_arg_index_(0),
- param_index_(0),
- is_split_long_or_double_(false) {
- }
-
- virtual ~QuickArgumentVisitor() {}
-
- virtual void Visit() = 0;
-
- bool IsParamAReference() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return caller_mh_.IsParamAReference(param_index_);
- }
-
- bool IsParamALongOrDouble() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return caller_mh_.IsParamALongOrDouble(param_index_);
- }
-
- Primitive::Type GetParamPrimitiveType() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return caller_mh_.GetParamPrimitiveType(param_index_);
- }
-
- byte* GetParamAddress() const {
- return cur_args_ + (cur_arg_index_ * kPointerSize);
- }
-
- bool IsSplitLongOrDouble() const {
- return is_split_long_or_double_;
- }
-
- uint64_t ReadSplitLongParam() const {
- DCHECK(IsSplitLongOrDouble());
- uint64_t low_half = *reinterpret_cast<uint32_t*>(GetParamAddress());
- uint64_t high_half = *reinterpret_cast<uint32_t*>(stack_args_);
- return (low_half & 0xffffffffULL) | (high_half << 32);
- }
-
- void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- for (cur_arg_index_ = 0; cur_arg_index_ < args_in_regs_ && param_index_ < num_params_; ) {
- is_split_long_or_double_ = (cur_arg_index_ == 2) && IsParamALongOrDouble();
- Visit();
- cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1);
- param_index_++;
- }
- cur_args_ = stack_args_;
- cur_arg_index_ = is_split_long_or_double_ ? 1 : 0;
- is_split_long_or_double_ = false;
- while (param_index_ < num_params_) {
- Visit();
- cur_arg_index_ += (IsParamALongOrDouble() ? 2 : 1);
- param_index_++;
- }
- }
-
- private:
- static size_t ComputeArgsInRegs(MethodHelper& mh) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- size_t args_in_regs = 0;
- size_t num_params = mh.NumArgs();
- for (size_t i = 0; i < num_params; i++) {
- args_in_regs = args_in_regs + (mh.IsParamALongOrDouble(i) ? 2 : 1);
- if (args_in_regs > 3) {
- args_in_regs = 3;
- break;
- }
- }
- return args_in_regs;
- }
- MethodHelper& caller_mh_;
- const size_t args_in_regs_;
- const size_t num_params_;
- byte* const reg_args_;
- byte* const stack_args_;
- byte* cur_args_;
- size_t cur_arg_index_;
- size_t param_index_;
- // Does a 64bit parameter straddle the register and stack arguments?
- bool is_split_long_or_double_;
-};
-
-} // namespace art
-
-#endif // ART_RUNTIME_OAT_RUNTIME_ARGUMENT_VISITOR_H_
diff --git a/runtime/oat/runtime/arm/oat_support_entrypoints_arm.cc b/runtime/oat/runtime/arm/oat_support_entrypoints_arm.cc
deleted file mode 100644
index 2e9453c..0000000
--- a/runtime/oat/runtime/arm/oat_support_entrypoints_arm.cc
+++ /dev/null
@@ -1,237 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "oat/runtime/oat_support_entrypoints.h"
-#include "runtime_support.h"
-
-namespace art {
-
-// Alloc entrypoints.
-extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t);
-extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
-extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method);
-extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method);
-extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t);
-extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
-
-// Cast entrypoints.
-extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass,
- const mirror::Class* ref_class);
-extern "C" void art_quick_can_put_array_element_from_code(void*, void*);
-extern "C" void art_quick_check_cast_from_code(void*, void*);
-
-// DexCache entrypoints.
-extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*);
-extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*);
-extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*);
-extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t);
-
-// Exception entrypoints.
-extern "C" void* GetAndClearException(Thread*);
-
-// Field entrypoints.
-extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t);
-extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t);
-extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t);
-extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t);
-extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*);
-extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*);
-extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*);
-extern "C" int32_t art_quick_get32_static_from_code(uint32_t);
-extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*);
-extern "C" int64_t art_quick_get64_static_from_code(uint32_t);
-extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*);
-extern "C" void* art_quick_get_obj_static_from_code(uint32_t);
-
-// FillArray entrypoint.
-extern "C" void art_quick_handle_fill_data_from_code(void*, void*);
-
-// Lock entrypoints.
-extern "C" void art_quick_lock_object_from_code(void*);
-extern "C" void art_quick_unlock_object_from_code(void*);
-
-// Math entrypoints.
-extern int32_t CmpgDouble(double a, double b);
-extern int32_t CmplDouble(double a, double b);
-extern int32_t CmpgFloat(float a, float b);
-extern int32_t CmplFloat(float a, float b);
-
-// Math conversions.
-extern "C" int32_t __aeabi_f2iz(float op1); // FLOAT_TO_INT
-extern "C" int32_t __aeabi_d2iz(double op1); // DOUBLE_TO_INT
-extern "C" float __aeabi_l2f(int64_t op1); // LONG_TO_FLOAT
-extern "C" double __aeabi_l2d(int64_t op1); // LONG_TO_DOUBLE
-
-// Single-precision FP arithmetics.
-extern "C" float fmodf(float a, float b); // REM_FLOAT[_2ADDR]
-
-// Double-precision FP arithmetics.
-extern "C" double fmod(double a, double b); // REM_DOUBLE[_2ADDR]
-
-// Integer arithmetics.
-extern "C" int __aeabi_idivmod(int32_t, int32_t); // [DIV|REM]_INT[_2ADDR|_LIT8|_LIT16]
-
-// Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR]
-extern "C" int64_t __aeabi_ldivmod(int64_t, int64_t);
-extern "C" int64_t art_quick_mul_long(int64_t, int64_t);
-extern "C" uint64_t art_quick_shl_long(uint64_t, uint32_t);
-extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t);
-extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t);
-
-// Interpreter entrypoints.
-extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh,
- const DexFile::CodeItem* code_item,
- ShadowFrame* shadow_frame, JValue* result);
-extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh,
- const DexFile::CodeItem* code_item,
- ShadowFrame* shadow_frame, JValue* result);
-
-// Intrinsic entrypoints.
-extern "C" int32_t __memcmp16(void*, void*, int32_t);
-extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
-extern "C" int32_t art_quick_string_compareto(void*, void*);
-
-// Invoke entrypoints.
-extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called,
- mirror::Object* receiver,
- mirror::AbstractMethod** sp, Thread* thread);
-extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called,
- mirror::Object* receiver,
- mirror::AbstractMethod** sp, Thread* thread);
-extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*);
-extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*);
-
-// Thread entrypoints.
-extern void CheckSuspendFromCode(Thread* thread);
-extern "C" void art_quick_test_suspend();
-
-// Throw entrypoints.
-extern "C" void art_quick_deliver_exception_from_code(void*);
-extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit);
-extern "C" void art_quick_throw_div_zero_from_code();
-extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx);
-extern "C" void art_quick_throw_null_pointer_exception_from_code();
-extern "C" void art_quick_throw_stack_overflow_from_code(void*);
-
-void InitEntryPoints(EntryPoints* points) {
- // Alloc
- points->pAllocArrayFromCode = art_quick_alloc_array_from_code;
- points->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check;
- points->pAllocObjectFromCode = art_quick_alloc_object_from_code;
- points->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check;
- points->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code;
- points->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check;
-
- // Cast
- points->pInstanceofNonTrivialFromCode = artIsAssignableFromCode;
- points->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code;
- points->pCheckCastFromCode = art_quick_check_cast_from_code;
-
- // DexCache
- points->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code;
- points->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code;
- points->pInitializeTypeFromCode = art_quick_initialize_type_from_code;
- points->pResolveStringFromCode = art_quick_resolve_string_from_code;
-
- // Field
- points->pSet32Instance = art_quick_set32_instance_from_code;
- points->pSet32Static = art_quick_set32_static_from_code;
- points->pSet64Instance = art_quick_set64_instance_from_code;
- points->pSet64Static = art_quick_set64_static_from_code;
- points->pSetObjInstance = art_quick_set_obj_instance_from_code;
- points->pSetObjStatic = art_quick_set_obj_static_from_code;
- points->pGet32Instance = art_quick_get32_instance_from_code;
- points->pGet64Instance = art_quick_get64_instance_from_code;
- points->pGetObjInstance = art_quick_get_obj_instance_from_code;
- points->pGet32Static = art_quick_get32_static_from_code;
- points->pGet64Static = art_quick_get64_static_from_code;
- points->pGetObjStatic = art_quick_get_obj_static_from_code;
-
- // FillArray
- points->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code;
-
- // JNI
- points->pJniMethodStart = JniMethodStart;
- points->pJniMethodStartSynchronized = JniMethodStartSynchronized;
- points->pJniMethodEnd = JniMethodEnd;
- points->pJniMethodEndSynchronized = JniMethodEndSynchronized;
- points->pJniMethodEndWithReference = JniMethodEndWithReference;
- points->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
-
- // Locks
- points->pLockObjectFromCode = art_quick_lock_object_from_code;
- points->pUnlockObjectFromCode = art_quick_unlock_object_from_code;
-
- // Math
- points->pCmpgDouble = CmpgDouble;
- points->pCmpgFloat = CmpgFloat;
- points->pCmplDouble = CmplDouble;
- points->pCmplFloat = CmplFloat;
- points->pFmod = fmod;
- points->pSqrt = sqrt;
- points->pL2d = __aeabi_l2d;
- points->pFmodf = fmodf;
- points->pL2f = __aeabi_l2f;
- points->pD2iz = __aeabi_d2iz;
- points->pF2iz = __aeabi_f2iz;
- points->pIdivmod = __aeabi_idivmod;
- points->pD2l = art_d2l;
- points->pF2l = art_f2l;
- points->pLdiv = __aeabi_ldivmod;
- points->pLdivmod = __aeabi_ldivmod; // result returned in r2:r3
- points->pLmul = art_quick_mul_long;
- points->pShlLong = art_quick_shl_long;
- points->pShrLong = art_quick_shr_long;
- points->pUshrLong = art_quick_ushr_long;
-
- // Interpreter
- points->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry;
- points->pInterpreterToQuickEntry = artInterpreterToQuickEntry;
-
- // Intrinsics
- points->pIndexOf = art_quick_indexof;
- points->pMemcmp16 = __memcmp16;
- points->pStringCompareTo = art_quick_string_compareto;
- points->pMemcpy = memcpy;
-
- // Invocation
- points->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline;
- points->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline;
- points->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
- points->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline;
- points->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
- points->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
- points->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
- points->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
-
- // Thread
- points->pCheckSuspendFromCode = CheckSuspendFromCode;
- points->pTestSuspendFromCode = art_quick_test_suspend;
-
- // Throws
- points->pDeliverException = art_quick_deliver_exception_from_code;
- points->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code;
- points->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code;
- points->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code;
- points->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code;
- points->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code;
-};
-
-} // namespace art
diff --git a/runtime/oat/runtime/mips/oat_support_entrypoints_mips.cc b/runtime/oat/runtime/mips/oat_support_entrypoints_mips.cc
deleted file mode 100644
index 8e06611..0000000
--- a/runtime/oat/runtime/mips/oat_support_entrypoints_mips.cc
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "runtime_support.h"
-#include "oat/runtime/oat_support_entrypoints.h"
-
-namespace art {
-
-// Alloc entrypoints.
-extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t);
-extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
-extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method);
-extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method);
-extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t);
-extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
-
-// Cast entrypoints.
-extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass,
- const mirror::Class* ref_class);
-extern "C" void art_quick_can_put_array_element_from_code(void*, void*);
-extern "C" void art_quick_check_cast_from_code(void*, void*);
-
-// DexCache entrypoints.
-extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*);
-extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*);
-extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*);
-extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t);
-
-// Exception entrypoints.
-extern "C" void* GetAndClearException(Thread*);
-
-// Field entrypoints.
-extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t);
-extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t);
-extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t);
-extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t);
-extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*);
-extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*);
-extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*);
-extern "C" int32_t art_quick_get32_static_from_code(uint32_t);
-extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*);
-extern "C" int64_t art_quick_get64_static_from_code(uint32_t);
-extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*);
-extern "C" void* art_quick_get_obj_static_from_code(uint32_t);
-
-// FillArray entrypoint.
-extern "C" void art_quick_handle_fill_data_from_code(void*, void*);
-
-// Lock entrypoints.
-extern "C" void art_quick_lock_object_from_code(void*);
-extern "C" void art_quick_unlock_object_from_code(void*);
-
-// Math entrypoints.
-extern int32_t CmpgDouble(double a, double b);
-extern int32_t CmplDouble(double a, double b);
-extern int32_t CmpgFloat(float a, float b);
-extern int32_t CmplFloat(float a, float b);
-extern "C" int64_t artLmulFromCode(int64_t a, int64_t b);
-extern "C" int64_t artLdivFromCode(int64_t a, int64_t b);
-extern "C" int64_t artLdivmodFromCode(int64_t a, int64_t b);
-
-// Math conversions.
-extern "C" int32_t __fixsfsi(float op1); // FLOAT_TO_INT
-extern "C" int32_t __fixdfsi(double op1); // DOUBLE_TO_INT
-extern "C" float __floatdisf(int64_t op1); // LONG_TO_FLOAT
-extern "C" double __floatdidf(int64_t op1); // LONG_TO_DOUBLE
-extern "C" int64_t __fixsfdi(float op1); // FLOAT_TO_LONG
-extern "C" int64_t __fixdfdi(double op1); // DOUBLE_TO_LONG
-
-// Single-precision FP arithmetics.
-extern "C" float fmodf(float a, float b); // REM_FLOAT[_2ADDR]
-
-// Double-precision FP arithmetics.
-extern "C" double fmod(double a, double b); // REM_DOUBLE[_2ADDR]
-
-// Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR]
-extern "C" int64_t __divdi3(int64_t, int64_t);
-extern "C" int64_t __moddi3(int64_t, int64_t);
-extern "C" uint64_t art_quick_shl_long(uint64_t, uint32_t);
-extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t);
-extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t);
-
-// Interpreter entrypoints.
-extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh,
- const DexFile::CodeItem* code_item,
- ShadowFrame* shadow_frame, JValue* result);
-extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh,
- const DexFile::CodeItem* code_item,
- ShadowFrame* shadow_frame, JValue* result);
-
-// Intrinsic entrypoints.
-extern "C" int32_t __memcmp16(void*, void*, int32_t);
-extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
-extern "C" int32_t art_quick_string_compareto(void*, void*);
-
-// Invoke entrypoints.
-extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called,
- mirror::Object* receiver,
- mirror::AbstractMethod** sp, Thread* thread);
-extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called,
- mirror::Object* receiver,
- mirror::AbstractMethod** sp, Thread* thread);
-extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*);
-extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*);
-
-// Thread entrypoints.
-extern void CheckSuspendFromCode(Thread* thread);
-extern "C" void art_quick_test_suspend();
-
-// Throw entrypoints.
-extern "C" void art_quick_deliver_exception_from_code(void*);
-extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit);
-extern "C" void art_quick_throw_div_zero_from_code();
-extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx);
-extern "C" void art_quick_throw_null_pointer_exception_from_code();
-extern "C" void art_quick_throw_stack_overflow_from_code(void*);
-
-void InitEntryPoints(EntryPoints* points) {
- // Alloc
- points->pAllocArrayFromCode = art_quick_alloc_array_from_code;
- points->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check;
- points->pAllocObjectFromCode = art_quick_alloc_object_from_code;
- points->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check;
- points->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code;
- points->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check;
-
- // Cast
- points->pInstanceofNonTrivialFromCode = artIsAssignableFromCode;
- points->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code;
- points->pCheckCastFromCode = art_quick_check_cast_from_code;
-
- // DexCache
- points->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code;
- points->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code;
- points->pInitializeTypeFromCode = art_quick_initialize_type_from_code;
- points->pResolveStringFromCode = art_quick_resolve_string_from_code;
-
- // Field
- points->pSet32Instance = art_quick_set32_instance_from_code;
- points->pSet32Static = art_quick_set32_static_from_code;
- points->pSet64Instance = art_quick_set64_instance_from_code;
- points->pSet64Static = art_quick_set64_static_from_code;
- points->pSetObjInstance = art_quick_set_obj_instance_from_code;
- points->pSetObjStatic = art_quick_set_obj_static_from_code;
- points->pGet32Instance = art_quick_get32_instance_from_code;
- points->pGet64Instance = art_quick_get64_instance_from_code;
- points->pGetObjInstance = art_quick_get_obj_instance_from_code;
- points->pGet32Static = art_quick_get32_static_from_code;
- points->pGet64Static = art_quick_get64_static_from_code;
- points->pGetObjStatic = art_quick_get_obj_static_from_code;
-
- // FillArray
- points->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code;
-
- // JNI
- points->pJniMethodStart = JniMethodStart;
- points->pJniMethodStartSynchronized = JniMethodStartSynchronized;
- points->pJniMethodEnd = JniMethodEnd;
- points->pJniMethodEndSynchronized = JniMethodEndSynchronized;
- points->pJniMethodEndWithReference = JniMethodEndWithReference;
- points->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
-
- // Locks
- points->pLockObjectFromCode = art_quick_lock_object_from_code;
- points->pUnlockObjectFromCode = art_quick_unlock_object_from_code;
-
- // Math
- points->pCmpgDouble = CmpgDouble;
- points->pCmpgFloat = CmpgFloat;
- points->pCmplDouble = CmplDouble;
- points->pCmplFloat = CmplFloat;
- points->pFmod = fmod;
- points->pL2d = __floatdidf;
- points->pFmodf = fmodf;
- points->pL2f = __floatdisf;
- points->pD2iz = __fixdfsi;
- points->pF2iz = __fixsfsi;
- points->pIdivmod = NULL;
- points->pD2l = art_d2l;
- points->pF2l = art_f2l;
- points->pLdiv = artLdivFromCode;
- points->pLdivmod = artLdivmodFromCode;
- points->pLmul = artLmulFromCode;
- points->pShlLong = art_quick_shl_long;
- points->pShrLong = art_quick_shr_long;
- points->pUshrLong = art_quick_ushr_long;
-
- // Interpreter
- points->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry;
- points->pInterpreterToQuickEntry = artInterpreterToQuickEntry;
-
- // Intrinsics
- points->pIndexOf = art_quick_indexof;
- points->pMemcmp16 = __memcmp16;
- points->pStringCompareTo = art_quick_string_compareto;
- points->pMemcpy = memcpy;
-
- // Invocation
- points->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline;
- points->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline;
- points->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
- points->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline;
- points->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
- points->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
- points->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
- points->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
-
- // Thread
- points->pCheckSuspendFromCode = CheckSuspendFromCode;
- points->pTestSuspendFromCode = art_quick_test_suspend;
-
- // Throws
- points->pDeliverException = art_quick_deliver_exception_from_code;
- points->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code;
- points->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code;
- points->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code;
- points->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code;
- points->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code;
-};
-
-} // namespace art
diff --git a/runtime/oat/runtime/support_proxy.cc b/runtime/oat/runtime/support_proxy.cc
deleted file mode 100644
index d4d0ca1..0000000
--- a/runtime/oat/runtime/support_proxy.cc
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "argument_visitor.h"
-#include "dex_file-inl.h"
-#include "mirror/abstract_method-inl.h"
-#include "mirror/object_array-inl.h"
-#include "mirror/object-inl.h"
-#include "object_utils.h"
-#include "reflection.h"
-#include "runtime_support.h"
-#include "scoped_thread_state_change.h"
-#include "thread.h"
-#include "well_known_classes.h"
-
-#include "ScopedLocalRef.h"
-
-namespace art {
-
-// Visits arguments on the stack placing them into the args vector, Object* arguments are converted
-// to jobjects.
-class BuildPortableArgumentVisitor : public PortableArgumentVisitor {
- public:
- BuildPortableArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp,
- ScopedObjectAccessUnchecked& soa, std::vector<jvalue>& args) :
- PortableArgumentVisitor(caller_mh, sp), soa_(soa), args_(args) {}
-
- virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- jvalue val;
- Primitive::Type type = GetParamPrimitiveType();
- switch (type) {
- case Primitive::kPrimNot: {
- mirror::Object* obj = *reinterpret_cast<mirror::Object**>(GetParamAddress());
- val.l = soa_.AddLocalReference<jobject>(obj);
- break;
- }
- case Primitive::kPrimLong: // Fall-through.
- case Primitive::kPrimDouble:
- val.j = *reinterpret_cast<jlong*>(GetParamAddress());
- break;
- case Primitive::kPrimBoolean: // Fall-through.
- case Primitive::kPrimByte: // Fall-through.
- case Primitive::kPrimChar: // Fall-through.
- case Primitive::kPrimShort: // Fall-through.
- case Primitive::kPrimInt: // Fall-through.
- case Primitive::kPrimFloat:
- val.i = *reinterpret_cast<jint*>(GetParamAddress());
- break;
- case Primitive::kPrimVoid:
- LOG(FATAL) << "UNREACHABLE";
- val.j = 0;
- break;
- }
- args_.push_back(val);
- }
-
- private:
- ScopedObjectAccessUnchecked& soa_;
- std::vector<jvalue>& args_;
-
- DISALLOW_COPY_AND_ASSIGN(BuildPortableArgumentVisitor);
-};
-
-// Visits arguments on the stack placing them into the args vector, Object* arguments are converted
-// to jobjects.
-class BuildQuickArgumentVisitor : public QuickArgumentVisitor {
- public:
- BuildQuickArgumentVisitor(MethodHelper& caller_mh, mirror::AbstractMethod** sp,
- ScopedObjectAccessUnchecked& soa, std::vector<jvalue>& args) :
- QuickArgumentVisitor(caller_mh, sp), soa_(soa), args_(args) {}
-
- virtual void Visit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- jvalue val;
- Primitive::Type type = GetParamPrimitiveType();
- switch (type) {
- case Primitive::kPrimNot: {
- mirror::Object* obj = *reinterpret_cast<mirror::Object**>(GetParamAddress());
- val.l = soa_.AddLocalReference<jobject>(obj);
- break;
- }
- case Primitive::kPrimLong: // Fall-through.
- case Primitive::kPrimDouble:
- if (IsSplitLongOrDouble()) {
- val.j = ReadSplitLongParam();
- } else {
- val.j = *reinterpret_cast<jlong*>(GetParamAddress());
- }
- break;
- case Primitive::kPrimBoolean: // Fall-through.
- case Primitive::kPrimByte: // Fall-through.
- case Primitive::kPrimChar: // Fall-through.
- case Primitive::kPrimShort: // Fall-through.
- case Primitive::kPrimInt: // Fall-through.
- case Primitive::kPrimFloat:
- val.i = *reinterpret_cast<jint*>(GetParamAddress());
- break;
- case Primitive::kPrimVoid:
- LOG(FATAL) << "UNREACHABLE";
- val.j = 0;
- break;
- }
- args_.push_back(val);
- }
-
- private:
- ScopedObjectAccessUnchecked& soa_;
- std::vector<jvalue>& args_;
-
- DISALLOW_COPY_AND_ASSIGN(BuildQuickArgumentVisitor);
-};
-
-// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method
-// which is responsible for recording callee save registers. We explicitly place into jobjects the
-// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a
-// field within the proxy object, which will box the primitive arguments and deal with error cases.
-extern "C" uint64_t artPortableProxyInvokeHandler(mirror::AbstractMethod* proxy_method,
- mirror::Object* receiver,
- Thread* self, mirror::AbstractMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // Ensure we don't get thread suspension until the object arguments are safely in jobjects.
- const char* old_cause =
- self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
- self->VerifyStack();
- // Start new JNI local reference state.
- JNIEnvExt* env = self->GetJniEnv();
- ScopedObjectAccessUnchecked soa(env);
- ScopedJniEnvLocalRefState env_state(env);
- // Create local ref. copies of proxy method and the receiver.
- jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
-
- // Placing arguments into args vector and remove the receiver.
- MethodHelper proxy_mh(proxy_method);
- std::vector<jvalue> args;
- BuildPortableArgumentVisitor local_ref_visitor(proxy_mh, sp, soa, args);
- local_ref_visitor.VisitArguments();
- args.erase(args.begin());
-
- // Convert proxy method into expected interface method.
- mirror::AbstractMethod* interface_method = proxy_method->FindOverriddenMethod();
- DCHECK(interface_method != NULL);
- DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
- jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_method);
-
- // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code
- // that performs allocations.
- self->EndAssertNoThreadSuspension(old_cause);
- JValue result = InvokeProxyInvocationHandler(soa, proxy_mh.GetShorty(),
- rcvr_jobj, interface_method_jobj, args);
- return result.GetJ();
-}
-
-// Handler for invocation on proxy methods. On entry a frame will exist for the proxy object method
-// which is responsible for recording callee save registers. We explicitly place into jobjects the
-// incoming reference arguments (so they survive GC). We invoke the invocation handler, which is a
-// field within the proxy object, which will box the primitive arguments and deal with error cases.
-extern "C" uint64_t artQuickProxyInvokeHandler(mirror::AbstractMethod* proxy_method,
- mirror::Object* receiver,
- Thread* self, mirror::AbstractMethod** sp)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // Ensure we don't get thread suspension until the object arguments are safely in jobjects.
- const char* old_cause =
- self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
- // Register the top of the managed stack, making stack crawlable.
- DCHECK_EQ(*sp, proxy_method);
- self->SetTopOfStack(sp, 0);
- DCHECK_EQ(proxy_method->GetFrameSizeInBytes(),
- Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetFrameSizeInBytes());
- self->VerifyStack();
- // Start new JNI local reference state.
- JNIEnvExt* env = self->GetJniEnv();
- ScopedObjectAccessUnchecked soa(env);
- ScopedJniEnvLocalRefState env_state(env);
- // Create local ref. copies of proxy method and the receiver.
- jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
-
- // Placing arguments into args vector and remove the receiver.
- MethodHelper proxy_mh(proxy_method);
- std::vector<jvalue> args;
- BuildQuickArgumentVisitor local_ref_visitor(proxy_mh, sp, soa, args);
- local_ref_visitor.VisitArguments();
- args.erase(args.begin());
-
- // Convert proxy method into expected interface method.
- mirror::AbstractMethod* interface_method = proxy_method->FindOverriddenMethod();
- DCHECK(interface_method != NULL);
- DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
- jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_method);
-
- // All naked Object*s should now be in jobjects, so its safe to go into the main invoke code
- // that performs allocations.
- self->EndAssertNoThreadSuspension(old_cause);
- JValue result = InvokeProxyInvocationHandler(soa, proxy_mh.GetShorty(),
- rcvr_jobj, interface_method_jobj, args);
- return result.GetJ();
-}
-
-} // namespace art
diff --git a/runtime/oat/runtime/x86/oat_support_entrypoints_x86.cc b/runtime/oat/runtime/x86/oat_support_entrypoints_x86.cc
deleted file mode 100644
index 7dfb07c..0000000
--- a/runtime/oat/runtime/x86/oat_support_entrypoints_x86.cc
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "oat/runtime/oat_support_entrypoints.h"
-#include "runtime_support.h"
-
-namespace art {
-
-// Alloc entrypoints.
-extern "C" void* art_quick_alloc_array_from_code(uint32_t, void*, int32_t);
-extern "C" void* art_quick_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
-extern "C" void* art_quick_alloc_object_from_code(uint32_t type_idx, void* method);
-extern "C" void* art_quick_alloc_object_from_code_with_access_check(uint32_t type_idx, void* method);
-extern "C" void* art_quick_check_and_alloc_array_from_code(uint32_t, void*, int32_t);
-extern "C" void* art_quick_check_and_alloc_array_from_code_with_access_check(uint32_t, void*, int32_t);
-
-// Cast entrypoints.
-extern "C" uint32_t art_quick_is_assignable_from_code(const mirror::Class* klass,
- const mirror::Class* ref_class);
-extern "C" void art_quick_can_put_array_element_from_code(void*, void*);
-extern "C" void art_quick_check_cast_from_code(void*, void*);
-
-// DexCache entrypoints.
-extern "C" void* art_quick_initialize_static_storage_from_code(uint32_t, void*);
-extern "C" void* art_quick_initialize_type_from_code(uint32_t, void*);
-extern "C" void* art_quick_initialize_type_and_verify_access_from_code(uint32_t, void*);
-extern "C" void* art_quick_resolve_string_from_code(void*, uint32_t);
-
-// Field entrypoints.
-extern "C" int art_quick_set32_instance_from_code(uint32_t, void*, int32_t);
-extern "C" int art_quick_set32_static_from_code(uint32_t, int32_t);
-extern "C" int art_quick_set64_instance_from_code(uint32_t, void*, int64_t);
-extern "C" int art_quick_set64_static_from_code(uint32_t, int64_t);
-extern "C" int art_quick_set_obj_instance_from_code(uint32_t, void*, void*);
-extern "C" int art_quick_set_obj_static_from_code(uint32_t, void*);
-extern "C" int32_t art_quick_get32_instance_from_code(uint32_t, void*);
-extern "C" int32_t art_quick_get32_static_from_code(uint32_t);
-extern "C" int64_t art_quick_get64_instance_from_code(uint32_t, void*);
-extern "C" int64_t art_quick_get64_static_from_code(uint32_t);
-extern "C" void* art_quick_get_obj_instance_from_code(uint32_t, void*);
-extern "C" void* art_quick_get_obj_static_from_code(uint32_t);
-
-// FillArray entrypoint.
-extern "C" void art_quick_handle_fill_data_from_code(void*, void*);
-
-// Lock entrypoints.
-extern "C" void art_quick_lock_object_from_code(void*);
-extern "C" void art_quick_unlock_object_from_code(void*);
-
-// Math entrypoints.
-extern "C" double art_quick_fmod_from_code(double, double);
-extern "C" float art_quick_fmodf_from_code(float, float);
-extern "C" double art_quick_l2d_from_code(int64_t);
-extern "C" float art_quick_l2f_from_code(int64_t);
-extern "C" int64_t art_quick_d2l_from_code(double);
-extern "C" int64_t art_quick_f2l_from_code(float);
-extern "C" int32_t art_quick_idivmod_from_code(int32_t, int32_t);
-extern "C" int64_t art_quick_ldiv_from_code(int64_t, int64_t);
-extern "C" int64_t art_quick_ldivmod_from_code(int64_t, int64_t);
-extern "C" int64_t art_quick_lmul_from_code(int64_t, int64_t);
-extern "C" uint64_t art_quick_lshl_from_code(uint64_t, uint32_t);
-extern "C" uint64_t art_quick_lshr_from_code(uint64_t, uint32_t);
-extern "C" uint64_t art_quick_lushr_from_code(uint64_t, uint32_t);
-
-// Interpreter entrypoints.
-extern "C" void artInterpreterToInterpreterEntry(Thread* self, MethodHelper& mh,
- const DexFile::CodeItem* code_item,
- ShadowFrame* shadow_frame, JValue* result);
-extern "C" void artInterpreterToQuickEntry(Thread* self, MethodHelper& mh,
- const DexFile::CodeItem* code_item,
- ShadowFrame* shadow_frame, JValue* result);
-
-// Intrinsic entrypoints.
-extern "C" int32_t art_quick_memcmp16(void*, void*, int32_t);
-extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
-extern "C" int32_t art_quick_string_compareto(void*, void*);
-extern "C" void* art_quick_memcpy(void*, const void*, size_t);
-
-// Invoke entrypoints.
-extern "C" const void* artPortableResolutionTrampoline(mirror::AbstractMethod* called,
- mirror::Object* receiver,
- mirror::AbstractMethod** sp, Thread* thread);
-extern "C" const void* artQuickResolutionTrampoline(mirror::AbstractMethod* called,
- mirror::Object* receiver,
- mirror::AbstractMethod** sp, Thread* thread);
-extern "C" void art_quick_invoke_direct_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_interface_trampoline(uint32_t, void*);
-extern "C" void art_quick_invoke_interface_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_static_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_super_trampoline_with_access_check(uint32_t, void*);
-extern "C" void art_quick_invoke_virtual_trampoline_with_access_check(uint32_t, void*);
-
-// Thread entrypoints.
-extern void CheckSuspendFromCode(Thread* thread);
-extern "C" void art_quick_test_suspend();
-
-// Throw entrypoints.
-extern "C" void art_quick_deliver_exception_from_code(void*);
-extern "C" void art_quick_throw_array_bounds_from_code(int32_t index, int32_t limit);
-extern "C" void art_quick_throw_div_zero_from_code();
-extern "C" void art_quick_throw_no_such_method_from_code(int32_t method_idx);
-extern "C" void art_quick_throw_null_pointer_exception_from_code();
-extern "C" void art_quick_throw_stack_overflow_from_code(void*);
-
-void InitEntryPoints(EntryPoints* points) {
- // Alloc
- points->pAllocArrayFromCode = art_quick_alloc_array_from_code;
- points->pAllocArrayFromCodeWithAccessCheck = art_quick_alloc_array_from_code_with_access_check;
- points->pAllocObjectFromCode = art_quick_alloc_object_from_code;
- points->pAllocObjectFromCodeWithAccessCheck = art_quick_alloc_object_from_code_with_access_check;
- points->pCheckAndAllocArrayFromCode = art_quick_check_and_alloc_array_from_code;
- points->pCheckAndAllocArrayFromCodeWithAccessCheck = art_quick_check_and_alloc_array_from_code_with_access_check;
-
- // Cast
- points->pInstanceofNonTrivialFromCode = art_quick_is_assignable_from_code;
- points->pCanPutArrayElementFromCode = art_quick_can_put_array_element_from_code;
- points->pCheckCastFromCode = art_quick_check_cast_from_code;
-
- // DexCache
- points->pInitializeStaticStorage = art_quick_initialize_static_storage_from_code;
- points->pInitializeTypeAndVerifyAccessFromCode = art_quick_initialize_type_and_verify_access_from_code;
- points->pInitializeTypeFromCode = art_quick_initialize_type_from_code;
- points->pResolveStringFromCode = art_quick_resolve_string_from_code;
-
- // Field
- points->pSet32Instance = art_quick_set32_instance_from_code;
- points->pSet32Static = art_quick_set32_static_from_code;
- points->pSet64Instance = art_quick_set64_instance_from_code;
- points->pSet64Static = art_quick_set64_static_from_code;
- points->pSetObjInstance = art_quick_set_obj_instance_from_code;
- points->pSetObjStatic = art_quick_set_obj_static_from_code;
- points->pGet32Instance = art_quick_get32_instance_from_code;
- points->pGet64Instance = art_quick_get64_instance_from_code;
- points->pGetObjInstance = art_quick_get_obj_instance_from_code;
- points->pGet32Static = art_quick_get32_static_from_code;
- points->pGet64Static = art_quick_get64_static_from_code;
- points->pGetObjStatic = art_quick_get_obj_static_from_code;
-
- // FillArray
- points->pHandleFillArrayDataFromCode = art_quick_handle_fill_data_from_code;
-
- // JNI
- points->pJniMethodStart = JniMethodStart;
- points->pJniMethodStartSynchronized = JniMethodStartSynchronized;
- points->pJniMethodEnd = JniMethodEnd;
- points->pJniMethodEndSynchronized = JniMethodEndSynchronized;
- points->pJniMethodEndWithReference = JniMethodEndWithReference;
- points->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized;
-
- // Locks
- points->pLockObjectFromCode = art_quick_lock_object_from_code;
- points->pUnlockObjectFromCode = art_quick_unlock_object_from_code;
-
- // Math
- // points->pCmpgDouble = NULL; // Not needed on x86.
- // points->pCmpgFloat = NULL; // Not needed on x86.
- // points->pCmplDouble = NULL; // Not needed on x86.
- // points->pCmplFloat = NULL; // Not needed on x86.
- points->pFmod = art_quick_fmod_from_code;
- points->pL2d = art_quick_l2d_from_code;
- points->pFmodf = art_quick_fmodf_from_code;
- points->pL2f = art_quick_l2f_from_code;
- // points->pD2iz = NULL; // Not needed on x86.
- // points->pF2iz = NULL; // Not needed on x86.
- points->pIdivmod = art_quick_idivmod_from_code;
- points->pD2l = art_quick_d2l_from_code;
- points->pF2l = art_quick_f2l_from_code;
- points->pLdiv = art_quick_ldiv_from_code;
- points->pLdivmod = art_quick_ldivmod_from_code;
- points->pLmul = art_quick_lmul_from_code;
- points->pShlLong = art_quick_lshl_from_code;
- points->pShrLong = art_quick_lshr_from_code;
- points->pUshrLong = art_quick_lushr_from_code;
-
- // Interpreter
- points->pInterpreterToInterpreterEntry = artInterpreterToInterpreterEntry;
- points->pInterpreterToQuickEntry = artInterpreterToQuickEntry;
-
- // Intrinsics
- points->pIndexOf = art_quick_indexof;
- points->pMemcmp16 = art_quick_memcmp16;
- points->pStringCompareTo = art_quick_string_compareto;
- points->pMemcpy = art_quick_memcpy;
-
- // Invocation
- points->pPortableResolutionTrampolineFromCode = artPortableResolutionTrampoline;
- points->pQuickResolutionTrampolineFromCode = artQuickResolutionTrampoline;
- points->pInvokeDirectTrampolineWithAccessCheck = art_quick_invoke_direct_trampoline_with_access_check;
- points->pInvokeInterfaceTrampoline = art_quick_invoke_interface_trampoline;
- points->pInvokeInterfaceTrampolineWithAccessCheck = art_quick_invoke_interface_trampoline_with_access_check;
- points->pInvokeStaticTrampolineWithAccessCheck = art_quick_invoke_static_trampoline_with_access_check;
- points->pInvokeSuperTrampolineWithAccessCheck = art_quick_invoke_super_trampoline_with_access_check;
- points->pInvokeVirtualTrampolineWithAccessCheck = art_quick_invoke_virtual_trampoline_with_access_check;
-
- // Thread
- points->pCheckSuspendFromCode = CheckSuspendFromCode;
- points->pTestSuspendFromCode = art_quick_test_suspend;
-
- // Throws
- points->pDeliverException = art_quick_deliver_exception_from_code;
- points->pThrowArrayBoundsFromCode = art_quick_throw_array_bounds_from_code;
- points->pThrowDivZeroFromCode = art_quick_throw_div_zero_from_code;
- points->pThrowNoSuchMethodFromCode = art_quick_throw_no_such_method_from_code;
- points->pThrowNullPointerFromCode = art_quick_throw_null_pointer_exception_from_code;
- points->pThrowStackOverflowFromCode = art_quick_throw_stack_overflow_from_code;
-};
-
-} // namespace art
diff --git a/runtime/oat_test.cc b/runtime/oat_test.cc
index ebb228e..5d0dca9 100644
--- a/runtime/oat_test.cc
+++ b/runtime/oat_test.cc
@@ -74,10 +74,11 @@
#else
CompilerBackend compiler_backend = kQuick;
#endif
- compiler_driver_.reset(new CompilerDriver(compiler_backend, kThumb2, false, NULL, 2, true));
+ InstructionSet insn_set = kIsTargetBuild ? kThumb2 : kX86;
+ compiler_driver_.reset(new CompilerDriver(compiler_backend, insn_set, false, NULL, 2, true));
jobject class_loader = NULL;
if (compile) {
- TimingLogger timings("OatTest::WriteRead", false);
+ base::TimingLogger timings("OatTest::WriteRead", false, false);
compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), timings);
}
@@ -96,7 +97,7 @@
ASSERT_TRUE(success);
if (compile) { // OatWriter strips the code, regenerate to compare
- TimingLogger timings("CommonTest::WriteRead", false);
+ base::TimingLogger timings("CommonTest::WriteRead", false, false);
compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), timings);
}
UniquePtr<OatFile> oat_file(OatFile::Open(tmp.GetFilename(), tmp.GetFilename(), NULL, false));
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 0c13ad2..485c636 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -28,11 +28,11 @@
#include <limits>
#include <vector>
+#include "arch/arm/registers_arm.h"
+#include "arch/mips/registers_mips.h"
+#include "arch/x86/registers_x86.h"
#include "atomic.h"
#include "class_linker.h"
-#include "constants_arm.h"
-#include "constants_mips.h"
-#include "constants_x86.h"
#include "debugger.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/heap.h"
@@ -134,10 +134,10 @@
delete java_vm_;
Thread::Shutdown();
QuasiAtomic::Shutdown();
+ verifier::MethodVerifier::Shutdown();
// TODO: acquire a static mutex on Runtime to avoid racing.
CHECK(instance_ == NULL || instance_ == this);
instance_ = NULL;
- verifier::MethodVerifier::Shutdown();
}
struct AbortState {
diff --git a/runtime/runtime_support_llvm.cc b/runtime/runtime_support_llvm.cc
deleted file mode 100644
index 9d83f9e..0000000
--- a/runtime/runtime_support_llvm.cc
+++ /dev/null
@@ -1,925 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "runtime_support_llvm.h"
-
-#include "ScopedLocalRef.h"
-#include "asm_support.h"
-#include "class_linker.h"
-#include "class_linker-inl.h"
-#include "dex_file-inl.h"
-#include "dex_instruction.h"
-#include "mirror/abstract_method-inl.h"
-#include "mirror/class-inl.h"
-#include "mirror/dex_cache-inl.h"
-#include "mirror/field-inl.h"
-#include "mirror/object.h"
-#include "mirror/object-inl.h"
-#include "mirror/object_array-inl.h"
-#include "nth_caller_visitor.h"
-#include "object_utils.h"
-#include "reflection.h"
-#include "runtime_support.h"
-#include "runtime_support_llvm_func_list.h"
-#include "scoped_thread_state_change.h"
-#include "thread.h"
-#include "thread_list.h"
-#include "verifier/dex_gc_map.h"
-#include "verifier/method_verifier.h"
-#include "well_known_classes.h"
-
-#include <algorithm>
-#include <math.h>
-#include <stdarg.h>
-#include <stdint.h>
-#include <stdlib.h>
-
-namespace art {
-
-using ::art::mirror::AbstractMethod;
-
-class ShadowFrameCopyVisitor : public StackVisitor {
- public:
- explicit ShadowFrameCopyVisitor(Thread* self) : StackVisitor(self, NULL), prev_frame_(NULL),
- top_frame_(NULL) {}
-
- bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (IsShadowFrame()) {
- ShadowFrame* cur_frame = GetCurrentShadowFrame();
- size_t num_regs = cur_frame->NumberOfVRegs();
- AbstractMethod* method = cur_frame->GetMethod();
- uint32_t dex_pc = cur_frame->GetDexPC();
- ShadowFrame* new_frame = ShadowFrame::Create(num_regs, NULL, method, dex_pc);
-
- const uint8_t* gc_map = method->GetNativeGcMap();
- uint32_t gc_map_length = static_cast<uint32_t>((gc_map[0] << 24) |
- (gc_map[1] << 16) |
- (gc_map[2] << 8) |
- (gc_map[3] << 0));
- verifier::DexPcToReferenceMap dex_gc_map(gc_map + 4, gc_map_length);
- const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc);
- for (size_t reg = 0; reg < num_regs; ++reg) {
- if (TestBitmap(reg, reg_bitmap)) {
- new_frame->SetVRegReference(reg, cur_frame->GetVRegReference(reg));
- } else {
- new_frame->SetVReg(reg, cur_frame->GetVReg(reg));
- }
- }
-
- if (prev_frame_ != NULL) {
- prev_frame_->SetLink(new_frame);
- } else {
- top_frame_ = new_frame;
- }
- prev_frame_ = new_frame;
- }
- return true;
- }
-
- ShadowFrame* GetShadowFrameCopy() {
- return top_frame_;
- }
-
- private:
- static bool TestBitmap(int reg, const uint8_t* reg_vector) {
- return ((reg_vector[reg / 8] >> (reg % 8)) & 0x01) != 0;
- }
-
- ShadowFrame* prev_frame_;
- ShadowFrame* top_frame_;
-};
-
-} // namespace art
-
-extern "C" {
-using ::art::CatchHandlerIterator;
-using ::art::DexFile;
-using ::art::FindFieldFast;
-using ::art::FindMethodFast;
-using ::art::InstanceObjectRead;
-using ::art::InstanceObjectWrite;
-using ::art::InstancePrimitiveRead;
-using ::art::InstancePrimitiveWrite;
-using ::art::Instruction;
-using ::art::InvokeType;
-using ::art::JNIEnvExt;
-using ::art::JValue;
-using ::art::Locks;
-using ::art::MethodHelper;
-using ::art::PrettyClass;
-using ::art::PrettyMethod;
-using ::art::Primitive;
-using ::art::ResolveStringFromCode;
-using ::art::Runtime;
-using ::art::ScopedJniEnvLocalRefState;
-using ::art::ScopedObjectAccessUnchecked;
-using ::art::ShadowFrame;
-using ::art::ShadowFrameCopyVisitor;
-using ::art::StaticObjectRead;
-using ::art::StaticObjectWrite;
-using ::art::StaticPrimitiveRead;
-using ::art::StaticPrimitiveWrite;
-using ::art::Thread;
-using ::art::Thread;
-using ::art::ThrowArithmeticExceptionDivideByZero;
-using ::art::ThrowArrayIndexOutOfBoundsException;
-using ::art::ThrowArrayStoreException;
-using ::art::ThrowClassCastException;
-using ::art::ThrowLocation;
-using ::art::ThrowNoSuchMethodError;
-using ::art::ThrowNullPointerException;
-using ::art::ThrowNullPointerExceptionFromDexPC;
-using ::art::ThrowStackOverflowError;
-using ::art::kDirect;
-using ::art::kInterface;
-using ::art::kNative;
-using ::art::kStatic;
-using ::art::kSuper;
-using ::art::kVirtual;
-using ::art::mirror::AbstractMethod;
-using ::art::mirror::Array;
-using ::art::mirror::Class;
-using ::art::mirror::Field;
-using ::art::mirror::Object;
-using ::art::mirror::Throwable;
-
-//----------------------------------------------------------------------------
-// Thread
-//----------------------------------------------------------------------------
-
-Thread* art_portable_get_current_thread_from_code() {
-#if defined(__arm__) || defined(__i386__)
- LOG(FATAL) << "UNREACHABLE";
-#endif
- return Thread::Current();
-}
-
-void* art_portable_set_current_thread_from_code(void* thread_object_addr) {
- // Hijacked to set r9 on ARM.
- LOG(FATAL) << "UNREACHABLE";
- return NULL;
-}
-
-void art_portable_lock_object_from_code(Object* obj, Thread* thread)
- EXCLUSIVE_LOCK_FUNCTION(monitor_lock_) {
- DCHECK(obj != NULL); // Assumed to have been checked before entry
- obj->MonitorEnter(thread); // May block
- DCHECK(thread->HoldsLock(obj));
- // Only possible exception is NPE and is handled before entry
- DCHECK(!thread->IsExceptionPending());
-}
-
-void art_portable_unlock_object_from_code(Object* obj, Thread* thread)
- UNLOCK_FUNCTION(monitor_lock_) {
- DCHECK(obj != NULL); // Assumed to have been checked before entry
- // MonitorExit may throw exception
- obj->MonitorExit(thread);
-}
-
-void art_portable_test_suspend_from_code(Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CheckSuspend(self);
- if (Runtime::Current()->GetInstrumentation()->ShouldPortableCodeDeoptimize()) {
- // Save out the shadow frame to the heap
- ShadowFrameCopyVisitor visitor(self);
- visitor.WalkStack(true);
- self->SetDeoptimizationShadowFrame(visitor.GetShadowFrameCopy());
- self->SetDeoptimizationReturnValue(JValue());
- self->SetException(ThrowLocation(), reinterpret_cast<Throwable*>(-1));
- }
-}
-
-ShadowFrame* art_portable_push_shadow_frame_from_code(Thread* thread,
- ShadowFrame* new_shadow_frame,
- AbstractMethod* method,
- uint32_t num_vregs) {
- ShadowFrame* old_frame = thread->PushShadowFrame(new_shadow_frame);
- new_shadow_frame->SetMethod(method);
- new_shadow_frame->SetNumberOfVRegs(num_vregs);
- return old_frame;
-}
-
-void art_portable_pop_shadow_frame_from_code(void*) {
- LOG(FATAL) << "Implemented by IRBuilder.";
-}
-
-void art_portable_mark_gc_card_from_code(void *, void*) {
- LOG(FATAL) << "Implemented by IRBuilder.";
-}
-
-//----------------------------------------------------------------------------
-// Exception
-//----------------------------------------------------------------------------
-
-bool art_portable_is_exception_pending_from_code() {
- LOG(FATAL) << "Implemented by IRBuilder.";
- return false;
-}
-
-void art_portable_throw_div_zero_from_code() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ThrowArithmeticExceptionDivideByZero();
-}
-
-void art_portable_throw_array_bounds_from_code(int32_t index, int32_t length)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ThrowArrayIndexOutOfBoundsException(index, length);
-}
-
-void art_portable_throw_no_such_method_from_code(int32_t method_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ThrowNoSuchMethodError(method_idx);
-}
-
-void art_portable_throw_null_pointer_exception_from_code(uint32_t dex_pc)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // TODO: remove dex_pc argument from caller.
- UNUSED(dex_pc);
- Thread* self = Thread::Current();
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- ThrowNullPointerExceptionFromDexPC(throw_location);
-}
-
-void art_portable_throw_stack_overflow_from_code() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ThrowStackOverflowError(Thread::Current());
-}
-
-void art_portable_throw_exception_from_code(Throwable* exception)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Thread* self = Thread::Current();
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- if (exception == NULL) {
- ThrowNullPointerException(NULL, "throw with null exception");
- } else {
- self->SetException(throw_location, exception);
- }
-}
-
-void* art_portable_get_and_clear_exception(Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(self->IsExceptionPending());
- // TODO: make this inline.
- Throwable* exception = self->GetException(NULL);
- self->ClearException();
- return exception;
-}
-
-int32_t art_portable_find_catch_block_from_code(AbstractMethod* current_method,
- uint32_t ti_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Thread* self = Thread::Current(); // TODO: make an argument.
- ThrowLocation throw_location;
- Throwable* exception = self->GetException(&throw_location);
- // Check for special deoptimization exception.
- if (UNLIKELY(reinterpret_cast<int32_t>(exception) == -1)) {
- return -1;
- }
- Class* exception_type = exception->GetClass();
- MethodHelper mh(current_method);
- const DexFile::CodeItem* code_item = mh.GetCodeItem();
- DCHECK_LT(ti_offset, code_item->tries_size_);
- const DexFile::TryItem* try_item = DexFile::GetTryItems(*code_item, ti_offset);
-
- int iter_index = 0;
- int result = -1;
- uint32_t catch_dex_pc = -1;
- // Iterate over the catch handlers associated with dex_pc
- for (CatchHandlerIterator it(*code_item, *try_item); it.HasNext(); it.Next()) {
- uint16_t iter_type_idx = it.GetHandlerTypeIndex();
- // Catch all case
- if (iter_type_idx == DexFile::kDexNoIndex16) {
- catch_dex_pc = it.GetHandlerAddress();
- result = iter_index;
- break;
- }
- // Does this catch exception type apply?
- Class* iter_exception_type = mh.GetDexCacheResolvedType(iter_type_idx);
- if (UNLIKELY(iter_exception_type == NULL)) {
- // TODO: check, the verifier (class linker?) should take care of resolving all exception
- // classes early.
- LOG(WARNING) << "Unresolved exception class when finding catch block: "
- << mh.GetTypeDescriptorFromTypeIdx(iter_type_idx);
- } else if (iter_exception_type->IsAssignableFrom(exception_type)) {
- catch_dex_pc = it.GetHandlerAddress();
- result = iter_index;
- break;
- }
- ++iter_index;
- }
- if (result != -1) {
- // Handler found.
- Runtime::Current()->GetInstrumentation()->ExceptionCaughtEvent(self,
- throw_location,
- current_method,
- catch_dex_pc,
- exception);
- }
- return result;
-}
-
-
-//----------------------------------------------------------------------------
-// Object Space
-//----------------------------------------------------------------------------
-
-Object* art_portable_alloc_object_from_code(uint32_t type_idx, AbstractMethod* referrer, Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return AllocObjectFromCode(type_idx, referrer, thread, false);
-}
-
-Object* art_portable_alloc_object_from_code_with_access_check(uint32_t type_idx,
- AbstractMethod* referrer,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return AllocObjectFromCode(type_idx, referrer, thread, true);
-}
-
-Object* art_portable_alloc_array_from_code(uint32_t type_idx,
- AbstractMethod* referrer,
- uint32_t length,
- Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return AllocArrayFromCode(type_idx, referrer, length, self, false);
-}
-
-Object* art_portable_alloc_array_from_code_with_access_check(uint32_t type_idx,
- AbstractMethod* referrer,
- uint32_t length,
- Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return AllocArrayFromCode(type_idx, referrer, length, self, true);
-}
-
-Object* art_portable_check_and_alloc_array_from_code(uint32_t type_idx,
- AbstractMethod* referrer,
- uint32_t length,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, false);
-}
-
-Object* art_portable_check_and_alloc_array_from_code_with_access_check(uint32_t type_idx,
- AbstractMethod* referrer,
- uint32_t length,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return CheckAndAllocArrayFromCode(type_idx, referrer, length, thread, true);
-}
-
-static AbstractMethod* FindMethodHelper(uint32_t method_idx,
- Object* this_object,
- AbstractMethod* caller_method,
- bool access_check,
- InvokeType type,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- AbstractMethod* method = FindMethodFast(method_idx,
- this_object,
- caller_method,
- access_check,
- type);
- if (UNLIKELY(method == NULL)) {
- method = FindMethodFromCode(method_idx, this_object, caller_method,
- thread, access_check, type);
- if (UNLIKELY(method == NULL)) {
- CHECK(thread->IsExceptionPending());
- return 0; // failure
- }
- }
- DCHECK(!thread->IsExceptionPending());
- const void* code = method->GetEntryPointFromCompiledCode();
-
- // When we return, the caller will branch to this address, so it had better not be 0!
- if (UNLIKELY(code == NULL)) {
- MethodHelper mh(method);
- LOG(FATAL) << "Code was NULL in method: " << PrettyMethod(method)
- << " location: " << mh.GetDexFile().GetLocation();
- }
- return method;
-}
-
-Object* art_portable_find_static_method_from_code_with_access_check(uint32_t method_idx,
- Object* this_object,
- AbstractMethod* referrer,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return FindMethodHelper(method_idx, this_object, referrer, true, kStatic, thread);
-}
-
-Object* art_portable_find_direct_method_from_code_with_access_check(uint32_t method_idx,
- Object* this_object,
- AbstractMethod* referrer,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return FindMethodHelper(method_idx, this_object, referrer, true, kDirect, thread);
-}
-
-Object* art_portable_find_virtual_method_from_code_with_access_check(uint32_t method_idx,
- Object* this_object,
- AbstractMethod* referrer,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return FindMethodHelper(method_idx, this_object, referrer, true, kVirtual, thread);
-}
-
-Object* art_portable_find_super_method_from_code_with_access_check(uint32_t method_idx,
- Object* this_object,
- AbstractMethod* referrer,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return FindMethodHelper(method_idx, this_object, referrer, true, kSuper, thread);
-}
-
-Object* art_portable_find_interface_method_from_code_with_access_check(uint32_t method_idx,
- Object* this_object,
- AbstractMethod* referrer,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return FindMethodHelper(method_idx, this_object, referrer, true, kInterface, thread);
-}
-
-Object* art_portable_find_interface_method_from_code(uint32_t method_idx,
- Object* this_object,
- AbstractMethod* referrer,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return FindMethodHelper(method_idx, this_object, referrer, false, kInterface, thread);
-}
-
-Object* art_portable_initialize_static_storage_from_code(uint32_t type_idx,
- AbstractMethod* referrer,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return ResolveVerifyAndClinit(type_idx, referrer, thread, true, false);
-}
-
-Object* art_portable_initialize_type_from_code(uint32_t type_idx,
- AbstractMethod* referrer,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return ResolveVerifyAndClinit(type_idx, referrer, thread, false, false);
-}
-
-Object* art_portable_initialize_type_and_verify_access_from_code(uint32_t type_idx,
- AbstractMethod* referrer,
- Thread* thread)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // Called when caller isn't guaranteed to have access to a type and the dex cache may be
- // unpopulated
- return ResolveVerifyAndClinit(type_idx, referrer, thread, false, true);
-}
-
-Object* art_portable_resolve_string_from_code(AbstractMethod* referrer, uint32_t string_idx)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return ResolveStringFromCode(referrer, string_idx);
-}
-
-int32_t art_portable_set32_static_from_code(uint32_t field_idx,
- AbstractMethod* referrer,
- int32_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx,
- referrer,
- StaticPrimitiveWrite,
- sizeof(uint32_t));
- if (LIKELY(field != NULL)) {
- field->Set32(field->GetDeclaringClass(), new_value);
- return 0;
- }
- field = FindFieldFromCode(field_idx,
- referrer,
- Thread::Current(),
- StaticPrimitiveWrite,
- sizeof(uint32_t),
- true);
- if (LIKELY(field != NULL)) {
- field->Set32(field->GetDeclaringClass(), new_value);
- return 0;
- }
- return -1;
-}
-
-int32_t art_portable_set64_static_from_code(uint32_t field_idx,
- AbstractMethod* referrer,
- int64_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(uint64_t));
- if (LIKELY(field != NULL)) {
- field->Set64(field->GetDeclaringClass(), new_value);
- return 0;
- }
- field = FindFieldFromCode(field_idx,
- referrer,
- Thread::Current(),
- StaticPrimitiveWrite,
- sizeof(uint64_t),
- true);
- if (LIKELY(field != NULL)) {
- field->Set64(field->GetDeclaringClass(), new_value);
- return 0;
- }
- return -1;
-}
-
-int32_t art_portable_set_obj_static_from_code(uint32_t field_idx,
- AbstractMethod* referrer,
- Object* new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, StaticObjectWrite, sizeof(Object*));
- if (LIKELY(field != NULL)) {
- field->SetObj(field->GetDeclaringClass(), new_value);
- return 0;
- }
- field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
- StaticObjectWrite, sizeof(Object*), true);
- if (LIKELY(field != NULL)) {
- field->SetObj(field->GetDeclaringClass(), new_value);
- return 0;
- }
- return -1;
-}
-
-int32_t art_portable_get32_static_from_code(uint32_t field_idx, AbstractMethod* referrer)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint32_t));
- if (LIKELY(field != NULL)) {
- return field->Get32(field->GetDeclaringClass());
- }
- field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
- StaticPrimitiveRead, sizeof(uint32_t), true);
- if (LIKELY(field != NULL)) {
- return field->Get32(field->GetDeclaringClass());
- }
- return 0;
-}
-
-int64_t art_portable_get64_static_from_code(uint32_t field_idx, AbstractMethod* referrer)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(uint64_t));
- if (LIKELY(field != NULL)) {
- return field->Get64(field->GetDeclaringClass());
- }
- field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
- StaticPrimitiveRead, sizeof(uint64_t), true);
- if (LIKELY(field != NULL)) {
- return field->Get64(field->GetDeclaringClass());
- }
- return 0;
-}
-
-Object* art_portable_get_obj_static_from_code(uint32_t field_idx, AbstractMethod* referrer)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, StaticObjectRead, sizeof(Object*));
- if (LIKELY(field != NULL)) {
- return field->GetObj(field->GetDeclaringClass());
- }
- field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
- StaticObjectRead, sizeof(Object*), true);
- if (LIKELY(field != NULL)) {
- return field->GetObj(field->GetDeclaringClass());
- }
- return 0;
-}
-
-int32_t art_portable_set32_instance_from_code(uint32_t field_idx, AbstractMethod* referrer,
- Object* obj, uint32_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint32_t));
- if (LIKELY(field != NULL)) {
- field->Set32(obj, new_value);
- return 0;
- }
- field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
- InstancePrimitiveWrite, sizeof(uint32_t), true);
- if (LIKELY(field != NULL)) {
- field->Set32(obj, new_value);
- return 0;
- }
- return -1;
-}
-
-int32_t art_portable_set64_instance_from_code(uint32_t field_idx, AbstractMethod* referrer,
- Object* obj, int64_t new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(uint64_t));
- if (LIKELY(field != NULL)) {
- field->Set64(obj, new_value);
- return 0;
- }
- field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
- InstancePrimitiveWrite, sizeof(uint64_t), true);
- if (LIKELY(field != NULL)) {
- field->Set64(obj, new_value);
- return 0;
- }
- return -1;
-}
-
-int32_t art_portable_set_obj_instance_from_code(uint32_t field_idx, AbstractMethod* referrer,
- Object* obj, Object* new_value)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite, sizeof(Object*));
- if (LIKELY(field != NULL)) {
- field->SetObj(obj, new_value);
- return 0;
- }
- field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
- InstanceObjectWrite, sizeof(Object*), true);
- if (LIKELY(field != NULL)) {
- field->SetObj(obj, new_value);
- return 0;
- }
- return -1;
-}
-
-int32_t art_portable_get32_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint32_t));
- if (LIKELY(field != NULL)) {
- return field->Get32(obj);
- }
- field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
- InstancePrimitiveRead, sizeof(uint32_t), true);
- if (LIKELY(field != NULL)) {
- return field->Get32(obj);
- }
- return 0;
-}
-
-int64_t art_portable_get64_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(uint64_t));
- if (LIKELY(field != NULL)) {
- return field->Get64(obj);
- }
- field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
- InstancePrimitiveRead, sizeof(uint64_t), true);
- if (LIKELY(field != NULL)) {
- return field->Get64(obj);
- }
- return 0;
-}
-
-Object* art_portable_get_obj_instance_from_code(uint32_t field_idx, AbstractMethod* referrer, Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Field* field = FindFieldFast(field_idx, referrer, InstanceObjectRead, sizeof(Object*));
- if (LIKELY(field != NULL)) {
- return field->GetObj(obj);
- }
- field = FindFieldFromCode(field_idx, referrer, Thread::Current(),
- InstanceObjectRead, sizeof(Object*), true);
- if (LIKELY(field != NULL)) {
- return field->GetObj(obj);
- }
- return 0;
-}
-
-void art_portable_fill_array_data_from_code(AbstractMethod* method, uint32_t dex_pc,
- Array* array, uint32_t payload_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- // Test: Is array equal to null? (Guard NullPointerException)
- if (UNLIKELY(array == NULL)) {
- art_portable_throw_null_pointer_exception_from_code(dex_pc);
- return;
- }
-
- // Find the payload from the CodeItem
- MethodHelper mh(method);
- const DexFile::CodeItem* code_item = mh.GetCodeItem();
-
- DCHECK_GT(code_item->insns_size_in_code_units_, payload_offset);
-
- const Instruction::ArrayDataPayload* payload =
- reinterpret_cast<const Instruction::ArrayDataPayload*>(
- code_item->insns_ + payload_offset);
-
- DCHECK_EQ(payload->ident,
- static_cast<uint16_t>(Instruction::kArrayDataSignature));
-
- // Test: Is array big enough?
- uint32_t array_len = static_cast<uint32_t>(array->GetLength());
- if (UNLIKELY(array_len < payload->element_count)) {
- int32_t last_index = payload->element_count - 1;
- art_portable_throw_array_bounds_from_code(array_len, last_index);
- return;
- }
-
- // Copy the data
- size_t size = payload->element_width * payload->element_count;
- memcpy(array->GetRawData(payload->element_width), payload->data, size);
-}
-
-
-
-//----------------------------------------------------------------------------
-// Type checking, in the nature of casting
-//----------------------------------------------------------------------------
-
-int32_t art_portable_is_assignable_from_code(const Class* dest_type, const Class* src_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(dest_type != NULL);
- DCHECK(src_type != NULL);
- return dest_type->IsAssignableFrom(src_type) ? 1 : 0;
-}
-
-void art_portable_check_cast_from_code(const Class* dest_type, const Class* src_type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(dest_type->IsClass()) << PrettyClass(dest_type);
- DCHECK(src_type->IsClass()) << PrettyClass(src_type);
- if (UNLIKELY(!dest_type->IsAssignableFrom(src_type))) {
- ThrowClassCastException(dest_type, src_type);
- }
-}
-
-void art_portable_check_put_array_element_from_code(const Object* element,
- const Object* array)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (element == NULL) {
- return;
- }
- DCHECK(array != NULL);
- Class* array_class = array->GetClass();
- DCHECK(array_class != NULL);
- Class* component_type = array_class->GetComponentType();
- Class* element_class = element->GetClass();
- if (UNLIKELY(!component_type->IsAssignableFrom(element_class))) {
- ThrowArrayStoreException(element_class, array_class);
- }
- return;
-}
-
-//----------------------------------------------------------------------------
-// JNI
-//----------------------------------------------------------------------------
-
-// Called on entry to JNI, transition out of Runnable and release share of mutator_lock_.
-uint32_t art_portable_jni_method_start(Thread* self)
- UNLOCK_FUNCTION(GlobalSynchronizatio::mutator_lock_) {
- JNIEnvExt* env = self->GetJniEnv();
- uint32_t saved_local_ref_cookie = env->local_ref_cookie;
- env->local_ref_cookie = env->locals.GetSegmentState();
- self->TransitionFromRunnableToSuspended(kNative);
- return saved_local_ref_cookie;
-}
-
-uint32_t art_portable_jni_method_start_synchronized(jobject to_lock, Thread* self)
- UNLOCK_FUNCTION(Locks::mutator_lock_) {
- self->DecodeJObject(to_lock)->MonitorEnter(self);
- return art_portable_jni_method_start(self);
-}
-
-static inline void PopLocalReferences(uint32_t saved_local_ref_cookie, Thread* self) {
- JNIEnvExt* env = self->GetJniEnv();
- env->locals.SetSegmentState(env->local_ref_cookie);
- env->local_ref_cookie = saved_local_ref_cookie;
-}
-
-void art_portable_jni_method_end(uint32_t saved_local_ref_cookie, Thread* self)
- SHARED_LOCK_FUNCTION(Locks::mutator_lock_) {
- self->TransitionFromSuspendedToRunnable();
- PopLocalReferences(saved_local_ref_cookie, self);
-}
-
-
-void art_portable_jni_method_end_synchronized(uint32_t saved_local_ref_cookie,
- jobject locked,
- Thread* self)
- SHARED_LOCK_FUNCTION(Locks::mutator_lock_) {
- self->TransitionFromSuspendedToRunnable();
- UnlockJniSynchronizedMethod(locked, self); // Must decode before pop.
- PopLocalReferences(saved_local_ref_cookie, self);
-}
-
-Object* art_portable_jni_method_end_with_reference(jobject result,
- uint32_t saved_local_ref_cookie,
- Thread* self)
- SHARED_LOCK_FUNCTION(Locks::mutator_lock_) {
- self->TransitionFromSuspendedToRunnable();
- Object* o = self->DecodeJObject(result); // Must decode before pop.
- PopLocalReferences(saved_local_ref_cookie, self);
- // Process result.
- if (UNLIKELY(self->GetJniEnv()->check_jni)) {
- if (self->IsExceptionPending()) {
- return NULL;
- }
- CheckReferenceResult(o, self);
- }
- return o;
-}
-
-Object* art_portable_jni_method_end_with_reference_synchronized(jobject result,
- uint32_t saved_local_ref_cookie,
- jobject locked,
- Thread* self)
- SHARED_LOCK_FUNCTION(Locks::mutator_lock_) {
- self->TransitionFromSuspendedToRunnable();
- UnlockJniSynchronizedMethod(locked, self); // Must decode before pop.
- Object* o = self->DecodeJObject(result);
- PopLocalReferences(saved_local_ref_cookie, self);
- // Process result.
- if (UNLIKELY(self->GetJniEnv()->check_jni)) {
- if (self->IsExceptionPending()) {
- return NULL;
- }
- CheckReferenceResult(o, self);
- }
- return o;
-}
-
-// Handler for invocation on proxy methods. Create a boxed argument array and invoke the invocation
-// handler which is a field within the proxy object receiver. The var args encode the arguments
-// with the last argument being a pointer to a JValue to store the result in.
-void art_portable_proxy_invoke_handler_from_code(AbstractMethod* proxy_method, ...)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- va_list ap;
- va_start(ap, proxy_method);
-
- Object* receiver = va_arg(ap, Object*);
- Thread* self = va_arg(ap, Thread*);
- MethodHelper proxy_mh(proxy_method);
-
- // Ensure we don't get thread suspension until the object arguments are safely in jobjects.
- const char* old_cause =
- self->StartAssertNoThreadSuspension("Adding to IRT proxy object arguments");
- self->VerifyStack();
-
- // Start new JNI local reference state.
- JNIEnvExt* env = self->GetJniEnv();
- ScopedObjectAccessUnchecked soa(env);
- ScopedJniEnvLocalRefState env_state(env);
-
- // Create local ref. copies of the receiver.
- jobject rcvr_jobj = soa.AddLocalReference<jobject>(receiver);
-
- // Convert proxy method into expected interface method.
- AbstractMethod* interface_method = proxy_method->FindOverriddenMethod();
- DCHECK(interface_method != NULL);
- DCHECK(!interface_method->IsProxyMethod()) << PrettyMethod(interface_method);
- jobject interface_method_jobj = soa.AddLocalReference<jobject>(interface_method);
-
- // Record arguments and turn Object* arguments into jobject to survive GC.
- std::vector<jvalue> args;
- const size_t num_params = proxy_mh.NumArgs();
- for (size_t i = 1; i < num_params; ++i) {
- jvalue val;
- switch (proxy_mh.GetParamPrimitiveType(i)) {
- case Primitive::kPrimNot:
- val.l = soa.AddLocalReference<jobject>(va_arg(ap, Object*));
- break;
- case Primitive::kPrimBoolean: // Fall-through.
- case Primitive::kPrimByte: // Fall-through.
- case Primitive::kPrimChar: // Fall-through.
- case Primitive::kPrimShort: // Fall-through.
- case Primitive::kPrimInt: // Fall-through.
- val.i = va_arg(ap, jint);
- break;
- case Primitive::kPrimFloat:
- // TODO: should this be jdouble? Floats aren't passed to var arg routines.
- val.i = va_arg(ap, jint);
- break;
- case Primitive::kPrimDouble:
- val.d = (va_arg(ap, jdouble));
- break;
- case Primitive::kPrimLong:
- val.j = (va_arg(ap, jlong));
- break;
- case Primitive::kPrimVoid:
- LOG(FATAL) << "UNREACHABLE";
- val.j = 0;
- break;
- }
- args.push_back(val);
- }
- self->EndAssertNoThreadSuspension(old_cause);
- JValue* result_location = NULL;
- const char* shorty = proxy_mh.GetShorty();
- if (shorty[0] != 'V') {
- result_location = va_arg(ap, JValue*);
- }
- va_end(ap);
- JValue result = InvokeProxyInvocationHandler(soa, shorty, rcvr_jobj, interface_method_jobj, args);
- if (result_location != NULL) {
- *result_location = result;
- }
-}
-
-//----------------------------------------------------------------------------
-// Memory barrier
-//----------------------------------------------------------------------------
-
-void art_portable_constructor_barrier() {
- LOG(FATAL) << "Implemented by IRBuilder.";
-}
-} // extern "C"
diff --git a/runtime/runtime_support_llvm.h b/runtime/runtime_support_llvm.h
deleted file mode 100644
index 43ea953..0000000
--- a/runtime/runtime_support_llvm.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_RUNTIME_SUPPORT_LLVM_H_
-#define ART_RUNTIME_RUNTIME_SUPPORT_LLVM_H_
-
-extern "C" {
-//----------------------------------------------------------------------------
-// Runtime Support Function Lookup Callback
-//----------------------------------------------------------------------------
-void* art_portable_find_runtime_support_func(void* context, const char* name);
-} // extern "C"
-
-#endif // ART_RUNTIME_RUNTIME_SUPPORT_LLVM_H_
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 286a2a6..aeb15f0 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -16,7 +16,6 @@
#include "stack.h"
-#include "oat/runtime/context.h"
#include "mirror/abstract_method-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object.h"
diff --git a/runtime/stack.h b/runtime/stack.h
index 0b94f27..de93846 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -20,7 +20,7 @@
#include "dex_file.h"
#include "instrumentation.h"
#include "base/macros.h"
-#include "oat/runtime/context.h"
+#include "arch/context.h"
#include <stdint.h>
#include <string>
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 0b3a5b4..97a1410 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -30,6 +30,7 @@
#include <iostream>
#include <list>
+#include "arch/context.h"
#include "base/mutex.h"
#include "class_linker.h"
#include "class_linker-inl.h"
@@ -37,6 +38,7 @@
#include "cutils/atomic-inline.h"
#include "debugger.h"
#include "dex_file-inl.h"
+#include "entrypoints/entrypoint_utils.h"
#include "gc_map.h"
#include "gc/accounting/card_table-inl.h"
#include "gc/heap.h"
@@ -50,11 +52,9 @@
#include "mirror/object_array-inl.h"
#include "mirror/stack_trace_element.h"
#include "monitor.h"
-#include "oat/runtime/context.h"
#include "object_utils.h"
#include "reflection.h"
#include "runtime.h"
-#include "runtime_support.h"
#include "scoped_thread_state_change.h"
#include "ScopedLocalRef.h"
#include "ScopedUtfChars.h"
@@ -86,16 +86,23 @@
}
#endif
+void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints);
+
void Thread::InitFunctionPointers() {
#if !defined(__APPLE__) // The Mac GCC is too old to accept this code.
// Insert a placeholder so we can easily tell if we call an unimplemented entry point.
- uintptr_t* begin = reinterpret_cast<uintptr_t*>(&entrypoints_);
- uintptr_t* end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(begin) + sizeof(entrypoints_));
+ uintptr_t* begin = reinterpret_cast<uintptr_t*>(&quick_entrypoints_);
+ uintptr_t* end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(begin) + sizeof(quick_entrypoints_));
+ for (uintptr_t* it = begin; it != end; ++it) {
+ *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
+ }
+ begin = reinterpret_cast<uintptr_t*>(&portable_entrypoints_);
+ end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(begin) + sizeof(portable_entrypoints_));
for (uintptr_t* it = begin; it != end; ++it) {
*it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
}
#endif
- InitEntryPoints(&entrypoints_);
+ InitEntryPoints(&quick_entrypoints_, &portable_entrypoints_);
}
void Thread::SetDeoptimizationShadowFrame(ShadowFrame* sf) {
@@ -1582,86 +1589,87 @@
uint32_t offset;
const char* name;
};
-#define ENTRY_POINT_INFO(x) { ENTRYPOINT_OFFSET(x), #x }
+#define QUICK_ENTRY_POINT_INFO(x) { QUICK_ENTRYPOINT_OFFSET(x), #x }
+#define PORTABLE_ENTRY_POINT_INFO(x) { PORTABLE_ENTRYPOINT_OFFSET(x), #x }
static const EntryPointInfo gThreadEntryPointInfo[] = {
- ENTRY_POINT_INFO(pAllocArrayFromCode),
- ENTRY_POINT_INFO(pAllocArrayFromCodeWithAccessCheck),
- ENTRY_POINT_INFO(pAllocObjectFromCode),
- ENTRY_POINT_INFO(pAllocObjectFromCodeWithAccessCheck),
- ENTRY_POINT_INFO(pCheckAndAllocArrayFromCode),
- ENTRY_POINT_INFO(pCheckAndAllocArrayFromCodeWithAccessCheck),
- ENTRY_POINT_INFO(pInstanceofNonTrivialFromCode),
- ENTRY_POINT_INFO(pCanPutArrayElementFromCode),
- ENTRY_POINT_INFO(pCheckCastFromCode),
- ENTRY_POINT_INFO(pInitializeStaticStorage),
- ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccessFromCode),
- ENTRY_POINT_INFO(pInitializeTypeFromCode),
- ENTRY_POINT_INFO(pResolveStringFromCode),
- ENTRY_POINT_INFO(pSet32Instance),
- ENTRY_POINT_INFO(pSet32Static),
- ENTRY_POINT_INFO(pSet64Instance),
- ENTRY_POINT_INFO(pSet64Static),
- ENTRY_POINT_INFO(pSetObjInstance),
- ENTRY_POINT_INFO(pSetObjStatic),
- ENTRY_POINT_INFO(pGet32Instance),
- ENTRY_POINT_INFO(pGet32Static),
- ENTRY_POINT_INFO(pGet64Instance),
- ENTRY_POINT_INFO(pGet64Static),
- ENTRY_POINT_INFO(pGetObjInstance),
- ENTRY_POINT_INFO(pGetObjStatic),
- ENTRY_POINT_INFO(pHandleFillArrayDataFromCode),
- ENTRY_POINT_INFO(pJniMethodStart),
- ENTRY_POINT_INFO(pJniMethodStartSynchronized),
- ENTRY_POINT_INFO(pJniMethodEnd),
- ENTRY_POINT_INFO(pJniMethodEndSynchronized),
- ENTRY_POINT_INFO(pJniMethodEndWithReference),
- ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized),
- ENTRY_POINT_INFO(pLockObjectFromCode),
- ENTRY_POINT_INFO(pUnlockObjectFromCode),
- ENTRY_POINT_INFO(pCmpgDouble),
- ENTRY_POINT_INFO(pCmpgFloat),
- ENTRY_POINT_INFO(pCmplDouble),
- ENTRY_POINT_INFO(pCmplFloat),
- ENTRY_POINT_INFO(pFmod),
- ENTRY_POINT_INFO(pSqrt),
- ENTRY_POINT_INFO(pL2d),
- ENTRY_POINT_INFO(pFmodf),
- ENTRY_POINT_INFO(pL2f),
- ENTRY_POINT_INFO(pD2iz),
- ENTRY_POINT_INFO(pF2iz),
- ENTRY_POINT_INFO(pIdivmod),
- ENTRY_POINT_INFO(pD2l),
- ENTRY_POINT_INFO(pF2l),
- ENTRY_POINT_INFO(pLdiv),
- ENTRY_POINT_INFO(pLdivmod),
- ENTRY_POINT_INFO(pLmul),
- ENTRY_POINT_INFO(pShlLong),
- ENTRY_POINT_INFO(pShrLong),
- ENTRY_POINT_INFO(pUshrLong),
- ENTRY_POINT_INFO(pInterpreterToInterpreterEntry),
- ENTRY_POINT_INFO(pInterpreterToQuickEntry),
- ENTRY_POINT_INFO(pIndexOf),
- ENTRY_POINT_INFO(pMemcmp16),
- ENTRY_POINT_INFO(pStringCompareTo),
- ENTRY_POINT_INFO(pMemcpy),
- ENTRY_POINT_INFO(pPortableResolutionTrampolineFromCode),
- ENTRY_POINT_INFO(pQuickResolutionTrampolineFromCode),
- ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck),
- ENTRY_POINT_INFO(pInvokeInterfaceTrampoline),
- ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck),
- ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck),
- ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck),
- ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck),
- ENTRY_POINT_INFO(pCheckSuspendFromCode),
- ENTRY_POINT_INFO(pTestSuspendFromCode),
- ENTRY_POINT_INFO(pDeliverException),
- ENTRY_POINT_INFO(pThrowArrayBoundsFromCode),
- ENTRY_POINT_INFO(pThrowDivZeroFromCode),
- ENTRY_POINT_INFO(pThrowNoSuchMethodFromCode),
- ENTRY_POINT_INFO(pThrowNullPointerFromCode),
- ENTRY_POINT_INFO(pThrowStackOverflowFromCode),
+ QUICK_ENTRY_POINT_INFO(pAllocArrayFromCode),
+ QUICK_ENTRY_POINT_INFO(pAllocArrayFromCodeWithAccessCheck),
+ QUICK_ENTRY_POINT_INFO(pAllocObjectFromCode),
+ QUICK_ENTRY_POINT_INFO(pAllocObjectFromCodeWithAccessCheck),
+ QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayFromCode),
+ QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayFromCodeWithAccessCheck),
+ QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivialFromCode),
+ QUICK_ENTRY_POINT_INFO(pCanPutArrayElementFromCode),
+ QUICK_ENTRY_POINT_INFO(pCheckCastFromCode),
+ QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage),
+ QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccessFromCode),
+ QUICK_ENTRY_POINT_INFO(pInitializeTypeFromCode),
+ QUICK_ENTRY_POINT_INFO(pResolveStringFromCode),
+ QUICK_ENTRY_POINT_INFO(pSet32Instance),
+ QUICK_ENTRY_POINT_INFO(pSet32Static),
+ QUICK_ENTRY_POINT_INFO(pSet64Instance),
+ QUICK_ENTRY_POINT_INFO(pSet64Static),
+ QUICK_ENTRY_POINT_INFO(pSetObjInstance),
+ QUICK_ENTRY_POINT_INFO(pSetObjStatic),
+ QUICK_ENTRY_POINT_INFO(pGet32Instance),
+ QUICK_ENTRY_POINT_INFO(pGet32Static),
+ QUICK_ENTRY_POINT_INFO(pGet64Instance),
+ QUICK_ENTRY_POINT_INFO(pGet64Static),
+ QUICK_ENTRY_POINT_INFO(pGetObjInstance),
+ QUICK_ENTRY_POINT_INFO(pGetObjStatic),
+ QUICK_ENTRY_POINT_INFO(pHandleFillArrayDataFromCode),
+ QUICK_ENTRY_POINT_INFO(pJniMethodStart),
+ QUICK_ENTRY_POINT_INFO(pJniMethodStartSynchronized),
+ QUICK_ENTRY_POINT_INFO(pJniMethodEnd),
+ QUICK_ENTRY_POINT_INFO(pJniMethodEndSynchronized),
+ QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReference),
+ QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized),
+ QUICK_ENTRY_POINT_INFO(pLockObjectFromCode),
+ QUICK_ENTRY_POINT_INFO(pUnlockObjectFromCode),
+ QUICK_ENTRY_POINT_INFO(pCmpgDouble),
+ QUICK_ENTRY_POINT_INFO(pCmpgFloat),
+ QUICK_ENTRY_POINT_INFO(pCmplDouble),
+ QUICK_ENTRY_POINT_INFO(pCmplFloat),
+ QUICK_ENTRY_POINT_INFO(pFmod),
+ QUICK_ENTRY_POINT_INFO(pSqrt),
+ QUICK_ENTRY_POINT_INFO(pL2d),
+ QUICK_ENTRY_POINT_INFO(pFmodf),
+ QUICK_ENTRY_POINT_INFO(pL2f),
+ QUICK_ENTRY_POINT_INFO(pD2iz),
+ QUICK_ENTRY_POINT_INFO(pF2iz),
+ QUICK_ENTRY_POINT_INFO(pIdivmod),
+ QUICK_ENTRY_POINT_INFO(pD2l),
+ QUICK_ENTRY_POINT_INFO(pF2l),
+ QUICK_ENTRY_POINT_INFO(pLdiv),
+ QUICK_ENTRY_POINT_INFO(pLdivmod),
+ QUICK_ENTRY_POINT_INFO(pLmul),
+ QUICK_ENTRY_POINT_INFO(pShlLong),
+ QUICK_ENTRY_POINT_INFO(pShrLong),
+ QUICK_ENTRY_POINT_INFO(pUshrLong),
+ QUICK_ENTRY_POINT_INFO(pInterpreterToInterpreterEntry),
+ QUICK_ENTRY_POINT_INFO(pInterpreterToQuickEntry),
+ QUICK_ENTRY_POINT_INFO(pIndexOf),
+ QUICK_ENTRY_POINT_INFO(pMemcmp16),
+ QUICK_ENTRY_POINT_INFO(pStringCompareTo),
+ QUICK_ENTRY_POINT_INFO(pMemcpy),
+ QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampolineFromCode),
+ QUICK_ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck),
+ QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampoline),
+ QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck),
+ QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck),
+ QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck),
+ QUICK_ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck),
+ QUICK_ENTRY_POINT_INFO(pCheckSuspendFromCode),
+ QUICK_ENTRY_POINT_INFO(pTestSuspendFromCode),
+ QUICK_ENTRY_POINT_INFO(pDeliverException),
+ QUICK_ENTRY_POINT_INFO(pThrowArrayBoundsFromCode),
+ QUICK_ENTRY_POINT_INFO(pThrowDivZeroFromCode),
+ QUICK_ENTRY_POINT_INFO(pThrowNoSuchMethodFromCode),
+ QUICK_ENTRY_POINT_INFO(pThrowNullPointerFromCode),
+ QUICK_ENTRY_POINT_INFO(pThrowStackOverflowFromCode),
+ PORTABLE_ENTRY_POINT_INFO(pPortableResolutionTrampolineFromCode),
};
-#undef ENTRY_POINT_INFO
+#undef QUICK_ENTRY_POINT_INFO
void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_pointers) {
CHECK_EQ(size_of_pointers, 4U); // TODO: support 64-bit targets.
@@ -1686,8 +1694,9 @@
#undef DO_THREAD_OFFSET
size_t entry_point_count = arraysize(gThreadEntryPointInfo);
- CHECK_EQ(entry_point_count * size_of_pointers, sizeof(EntryPoints));
- uint32_t expected_offset = OFFSETOF_MEMBER(Thread, entrypoints_);
+ CHECK_EQ(entry_point_count * size_of_pointers,
+ sizeof(QuickEntryPoints) + sizeof(PortableEntryPoints));
+ uint32_t expected_offset = OFFSETOF_MEMBER(Thread, quick_entrypoints_);
for (size_t i = 0; i < entry_point_count; ++i) {
CHECK_EQ(gThreadEntryPointInfo[i].offset, expected_offset) << gThreadEntryPointInfo[i].name;
expected_offset += size_of_pointers;
@@ -1709,7 +1718,7 @@
self_(self), exception_(exception), is_deoptimization_(is_deoptimization),
to_find_(is_deoptimization ? NULL : exception->GetClass()), throw_location_(throw_location),
handler_quick_frame_(NULL), handler_quick_frame_pc_(0), handler_dex_pc_(0),
- native_method_count_(0),
+ native_method_count_(0), clear_exception_(false),
method_tracing_active_(is_deoptimization ||
Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()),
instrumentation_frames_to_pop_(0), top_shadow_frame_(NULL), prev_shadow_frame_(NULL) {
@@ -1754,7 +1763,7 @@
dex_pc = GetDexPc();
}
if (dex_pc != DexFile::kDexNoIndex) {
- uint32_t found_dex_pc = method->FindCatchBlock(to_find_, dex_pc);
+ uint32_t found_dex_pc = method->FindCatchBlock(to_find_, dex_pc, &clear_exception_);
if (found_dex_pc != DexFile::kDexNoIndex) {
handler_dex_pc_ = found_dex_pc;
handler_quick_frame_pc_ = method->ToNativePc(found_dex_pc);
@@ -1820,8 +1829,13 @@
LOG(INFO) << "Handler: " << PrettyMethod(catch_method) << " (line: " << line_number << ")";
}
}
- // Put exception back in root set and clear throw location.
- self_->SetException(ThrowLocation(), exception_);
+ if (clear_exception_) {
+ // Exception was cleared as part of delivery.
+ DCHECK(!self_->IsExceptionPending());
+ } else {
+ // Put exception back in root set with clear throw location.
+ self_->SetException(ThrowLocation(), exception_);
+ }
self_->EndAssertNoThreadSuspension(last_no_assert_suspension_cause_);
// Do instrumentation events after allowing thread suspension again.
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
@@ -1864,6 +1878,8 @@
uint32_t handler_dex_pc_;
// Number of native methods passed in crawl (equates to number of SIRTs to pop)
uint32_t native_method_count_;
+ // Should the exception be cleared as the catch block has no move-exception?
+ bool clear_exception_;
// Is method tracing active?
const bool method_tracing_active_;
// Support for nesting no thread suspension checks.
diff --git a/runtime/thread.h b/runtime/thread.h
index b9393a3..ff0fe22 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -26,9 +26,10 @@
#include <string>
#include "base/macros.h"
+#include "entrypoints/portable/portable_entrypoints.h"
+#include "entrypoints/quick/quick_entrypoints.h"
#include "globals.h"
#include "jvalue.h"
-#include "oat/runtime/oat_support_entrypoints.h"
#include "locks.h"
#include "offsets.h"
#include "root_visitor.h"
@@ -773,9 +774,10 @@
Closure* checkpoint_function_;
public:
- // Runtime support function pointers
+ // Entrypoint function pointers
// TODO: move this near the top, since changing its offset requires all oats to be recompiled!
- EntryPoints entrypoints_;
+ QuickEntryPoints quick_entrypoints_;
+ PortableEntryPoints portable_entrypoints_;
private:
// How many times has our pthread key's destructor been called?
diff --git a/runtime/thread_mips.cc b/runtime/thread_mips.cc
deleted file mode 100644
index 0ef26bf..0000000
--- a/runtime/thread_mips.cc
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "thread.h"
-
-#include "asm_support.h"
-#include "base/macros.h"
-
-namespace art {
-
-void Thread::InitCpu() {
- CHECK_EQ(THREAD_FLAGS_OFFSET, OFFSETOF_MEMBER(Thread, state_and_flags_));
- CHECK_EQ(THREAD_EXCEPTION_OFFSET, OFFSETOF_MEMBER(Thread, exception_));
-}
-
-} // namespace art
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 177fd48..2bce70f 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -29,14 +29,14 @@
#include "mirror/dex_cache.h"
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
-#if !defined(ART_USE_PORTABLE_COMPILER)
-#include "oat/runtime/oat_support_entrypoints.h"
-#endif
#include "object_utils.h"
#include "os.h"
#include "scoped_thread_state_change.h"
#include "thread.h"
#include "thread_list.h"
+#if !defined(ART_USE_PORTABLE_COMPILER)
+#include "entrypoints/quick/quick_entrypoints.h"
+#endif
namespace art {
diff --git a/runtime/verifier/instruction_flags.cc b/runtime/verifier/instruction_flags.cc
index 358791d..f76c226 100644
--- a/runtime/verifier/instruction_flags.cc
+++ b/runtime/verifier/instruction_flags.cc
@@ -22,16 +22,17 @@
namespace verifier {
std::string InstructionFlags::ToString() const {
- char encoding[6];
+ char encoding[7];
if (!IsOpcode()) {
- strncpy(encoding, "XXXXX", sizeof(encoding));
+ strncpy(encoding, "XXXXXX", sizeof(encoding));
} else {
- strncpy(encoding, "-----", sizeof(encoding));
- if (IsInTry()) encoding[kInTry] = 'T';
- if (IsBranchTarget()) encoding[kBranchTarget] = 'B';
+ strncpy(encoding, "------", sizeof(encoding));
+ if (IsVisited()) encoding[kVisited] = 'V';
+ if (IsChanged()) encoding[kChanged] = 'C';
+ if (IsInTry()) encoding[kInTry] = 'T';
+ if (IsBranchTarget()) encoding[kBranchTarget] = 'B';
if (IsCompileTimeInfoPoint()) encoding[kCompileTimeInfoPoint] = 'G';
- if (IsVisited()) encoding[kVisited] = 'V';
- if (IsChanged()) encoding[kChanged] = 'C';
+ if (IsReturn()) encoding[kReturn] = 'R';
}
return encoding;
}
diff --git a/runtime/verifier/instruction_flags.h b/runtime/verifier/instruction_flags.h
index 9b2e595..e50ba13 100644
--- a/runtime/verifier/instruction_flags.h
+++ b/runtime/verifier/instruction_flags.h
@@ -93,6 +93,21 @@
return IsVisited() || IsChanged();
}
+ void SetReturn() {
+ flags_ |= 1 << kReturn;
+ }
+ void ClearReturn() {
+ flags_ &= ~(1 << kReturn);
+ }
+ bool IsReturn() const {
+ return (flags_ & (1 << kReturn)) != 0;
+ }
+
+ void SetCompileTimeInfoPointAndReturn() {
+ SetCompileTimeInfoPoint();
+ SetReturn();
+ }
+
std::string ToString() const;
private:
@@ -108,6 +123,8 @@
kBranchTarget = 3,
// Location of interest to the compiler for GC maps and verifier based method sharpening.
kCompileTimeInfoPoint = 4,
+ // A return instruction.
+ kReturn = 5,
};
// Size of instruction in code units.
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index acb6557..9f0d911 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -102,7 +102,11 @@
error += dex_file.GetLocation();
return kHardFailure;
}
- return VerifyClass(&dex_file, kh.GetDexCache(), klass->GetClassLoader(), class_def_idx, error, allow_soft_failures);
+ return VerifyClass(&dex_file,
+ kh.GetDexCache(),
+ klass->GetClassLoader(),
+ class_def_idx, error,
+ allow_soft_failures);
}
MethodVerifier::FailureKind MethodVerifier::VerifyClass(const DexFile* dex_file,
@@ -142,8 +146,15 @@
// We couldn't resolve the method, but continue regardless.
Thread::Current()->ClearException();
}
- MethodVerifier::FailureKind result = VerifyMethod(method_idx, dex_file, dex_cache, class_loader,
- class_def_idx, it.GetMethodCodeItem(), method, it.GetMemberAccessFlags(), allow_soft_failures);
+ MethodVerifier::FailureKind result = VerifyMethod(method_idx,
+ dex_file,
+ dex_cache,
+ class_loader,
+ class_def_idx,
+ it.GetMethodCodeItem(),
+ method,
+ it.GetMemberAccessFlags(),
+ allow_soft_failures);
if (result != kNoFailure) {
if (result == kHardFailure) {
hard_fail = true;
@@ -177,8 +188,15 @@
// We couldn't resolve the method, but continue regardless.
Thread::Current()->ClearException();
}
- MethodVerifier::FailureKind result = VerifyMethod(method_idx, dex_file, dex_cache, class_loader,
- class_def_idx, it.GetMethodCodeItem(), method, it.GetMemberAccessFlags(), allow_soft_failures);
+ MethodVerifier::FailureKind result = VerifyMethod(method_idx,
+ dex_file,
+ dex_cache,
+ class_loader,
+ class_def_idx,
+ it.GetMethodCodeItem(),
+ method,
+ it.GetMemberAccessFlags(),
+ allow_soft_failures);
if (result != kNoFailure) {
if (result == kHardFailure) {
hard_fail = true;
@@ -282,7 +300,9 @@
new_instance_count_(0),
monitor_enter_count_(0),
can_load_classes_(can_load_classes),
- allow_soft_failures_(allow_soft_failures) {
+ allow_soft_failures_(allow_soft_failures),
+ has_check_casts_(false),
+ has_virtual_or_interface_invokes_(false) {
}
void MethodVerifier::FindLocksAtDexPc(mirror::AbstractMethod* m, uint32_t dex_pc,
@@ -470,6 +490,13 @@
new_instance_count++;
} else if (opcode == Instruction::MONITOR_ENTER) {
monitor_enter_count++;
+ } else if (opcode == Instruction::CHECK_CAST) {
+ has_check_casts_ = true;
+ } else if ((inst->Opcode() == Instruction::INVOKE_VIRTUAL) ||
+ (inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE) ||
+ (inst->Opcode() == Instruction::INVOKE_INTERFACE) ||
+ (inst->Opcode() == Instruction::INVOKE_INTERFACE_RANGE)) {
+ has_virtual_or_interface_invokes_ = true;
}
size_t inst_size = inst->SizeInCodeUnits();
insn_flags_[dex_pc].SetLengthInCodeUnits(inst_size);
@@ -506,7 +533,8 @@
return false;
}
if (!insn_flags_[start].IsOpcode()) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "'try' block starts inside an instruction (" << start << ")";
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD)
+ << "'try' block starts inside an instruction (" << start << ")";
return false;
}
for (uint32_t dex_pc = start; dex_pc < end;
@@ -523,7 +551,8 @@
for (; iterator.HasNext(); iterator.Next()) {
uint32_t dex_pc= iterator.GetHandlerAddress();
if (!insn_flags_[dex_pc].IsOpcode()) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "exception handler starts at bad address (" << dex_pc << ")";
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD)
+ << "exception handler starts at bad address (" << dex_pc << ")";
return false;
}
insn_flags_[dex_pc].SetBranchTarget();
@@ -560,8 +589,10 @@
/* Flag instructions that are garbage collection points */
// All invoke points are marked as "Throw" points already.
// We are relying on this to also count all the invokes as interesting.
- if (inst->IsBranch() || inst->IsSwitch() || inst->IsThrow() || inst->IsReturn()) {
+ if (inst->IsBranch() || inst->IsSwitch() || inst->IsThrow()) {
insn_flags_[dex_pc].SetCompileTimeInfoPoint();
+ } else if (inst->IsReturn()) {
+ insn_flags_[dex_pc].SetCompileTimeInfoPointAndReturn();
}
dex_pc += inst->SizeInCodeUnits();
inst = inst->Next();
@@ -727,11 +758,13 @@
}
if (bracket_count == 0) {
/* The given class must be an array type. */
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "can't new-array class '" << descriptor << "' (not an array)";
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD)
+ << "can't new-array class '" << descriptor << "' (not an array)";
return false;
} else if (bracket_count > 255) {
/* It is illegal to create an array of more than 255 dimensions. */
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "can't new-array class '" << descriptor << "' (exceeds limit)";
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD)
+ << "can't new-array class '" << descriptor << "' (exceeds limit)";
return false;
}
return true;
@@ -749,7 +782,8 @@
if ((int32_t) cur_offset + array_data_offset < 0 ||
cur_offset + array_data_offset + 2 >= insn_count) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid array data start: at " << cur_offset
- << ", data offset " << array_data_offset << ", count " << insn_count;
+ << ", data offset " << array_data_offset
+ << ", count " << insn_count;
return false;
}
/* offset to array data table is a relative branch-style offset */
@@ -781,18 +815,22 @@
return false;
}
if (!selfOkay && offset == 0) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "branch offset of zero not allowed at" << reinterpret_cast<void*>(cur_offset);
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "branch offset of zero not allowed at"
+ << reinterpret_cast<void*>(cur_offset);
return false;
}
// Check for 32-bit overflow. This isn't strictly necessary if we can depend on the runtime
// to have identical "wrap-around" behavior, but it's unwise to depend on that.
if (((int64_t) cur_offset + (int64_t) offset) != (int64_t) (cur_offset + offset)) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "branch target overflow " << reinterpret_cast<void*>(cur_offset) << " +" << offset;
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "branch target overflow "
+ << reinterpret_cast<void*>(cur_offset) << " +" << offset;
return false;
}
const uint32_t insn_count = code_item_->insns_size_in_code_units_;
int32_t abs_offset = cur_offset + offset;
- if (abs_offset < 0 || (uint32_t) abs_offset >= insn_count || !insn_flags_[abs_offset].IsOpcode()) {
+ if (abs_offset < 0 ||
+ (uint32_t) abs_offset >= insn_count ||
+ !insn_flags_[abs_offset].IsOpcode()) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid branch target " << offset << " (-> "
<< reinterpret_cast<void*>(abs_offset) << ") at "
<< reinterpret_cast<void*>(cur_offset);
@@ -848,7 +886,8 @@
int32_t switch_offset = insns[1] | ((int32_t) insns[2]) << 16;
if ((int32_t) cur_offset + switch_offset < 0 || cur_offset + switch_offset + 2 >= insn_count) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid switch start: at " << cur_offset
- << ", switch offset " << switch_offset << ", count " << insn_count;
+ << ", switch offset " << switch_offset
+ << ", count " << insn_count;
return false;
}
/* offset to switch table is a relative branch-style offset */
@@ -875,15 +914,16 @@
}
uint32_t table_size = targets_offset + switch_count * 2;
if (switch_insns[0] != expected_signature) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << StringPrintf("wrong signature for switch table (%x, wanted %x)",
- switch_insns[0], expected_signature);
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD)
+ << StringPrintf("wrong signature for switch table (%x, wanted %x)",
+ switch_insns[0], expected_signature);
return false;
}
/* make sure the end of the switch is in range */
if (cur_offset + switch_offset + table_size > (uint32_t) insn_count) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid switch end: at " << cur_offset << ", switch offset "
- << switch_offset << ", end "
- << (cur_offset + switch_offset + table_size)
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid switch end: at " << cur_offset
+ << ", switch offset " << switch_offset
+ << ", end " << (cur_offset + switch_offset + table_size)
<< ", count " << insn_count;
return false;
}
@@ -906,10 +946,13 @@
int32_t offset = (int32_t) switch_insns[targets_offset + targ * 2] |
(int32_t) (switch_insns[targets_offset + targ * 2 + 1] << 16);
int32_t abs_offset = cur_offset + offset;
- if (abs_offset < 0 || abs_offset >= (int32_t) insn_count || !insn_flags_[abs_offset].IsOpcode()) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid switch target " << offset << " (-> "
- << reinterpret_cast<void*>(abs_offset) << ") at "
- << reinterpret_cast<void*>(cur_offset) << "[" << targ << "]";
+ if (abs_offset < 0 ||
+ abs_offset >= (int32_t) insn_count ||
+ !insn_flags_[abs_offset].IsOpcode()) {
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid switch target " << offset
+ << " (-> " << reinterpret_cast<void*>(abs_offset) << ") at "
+ << reinterpret_cast<void*>(cur_offset)
+ << "[" << targ << "]";
return false;
}
insn_flags_[abs_offset].SetBranchTarget();
@@ -939,14 +982,15 @@
// vA/vC are unsigned 8-bit/16-bit quantities for /range instructions, so there's no risk of
// integer overflow when adding them here.
if (vA + vC > registers_size) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid reg index " << vA << "+" << vC << " in range invoke (> "
- << registers_size << ")";
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid reg index " << vA << "+" << vC
+ << " in range invoke (> " << registers_size << ")";
return false;
}
return true;
}
-static const std::vector<uint8_t>* CreateLengthPrefixedDexGcMap(const std::vector<uint8_t>& gc_map) {
+static const std::vector<uint8_t>* CreateLengthPrefixedDexGcMap(
+ const std::vector<uint8_t>& gc_map) {
std::vector<uint8_t>* length_prefixed_gc_map = new std::vector<uint8_t>;
length_prefixed_gc_map->reserve(gc_map.size() + 4);
length_prefixed_gc_map->push_back((gc_map.size() & 0xff000000) >> 24);
@@ -974,7 +1018,11 @@
<< " insns_size=" << insns_size << ")";
}
/* Create and initialize table holding register status */
- reg_table_.Init(kTrackCompilerInterestPoints, insn_flags_.get(), insns_size, registers_size, this);
+ reg_table_.Init(kTrackCompilerInterestPoints,
+ insn_flags_.get(),
+ insns_size,
+ registers_size,
+ this);
work_line_.reset(new RegisterLine(registers_size, this));
@@ -994,27 +1042,37 @@
return false;
}
- /* Generate a register map and add it to the method. */
- UniquePtr<const std::vector<uint8_t> > map(GenerateGcMap());
- if (map.get() == NULL) {
- DCHECK_NE(failures_.size(), 0U);
- return false; // Not a real failure, but a failure to encode
- }
- if (kIsDebugBuild) {
- VerifyGcMap(*map);
- }
- MethodReference ref(dex_file_, dex_method_idx_);
- const std::vector<uint8_t>* dex_gc_map = CreateLengthPrefixedDexGcMap(*(map.get()));
- verifier::MethodVerifier::SetDexGcMap(ref, *dex_gc_map);
+ // Compute information for compiler.
+ if (Runtime::Current()->IsCompiler()) {
+ MethodReference ref(dex_file_, dex_method_idx_);
+ bool compile = IsCandidateForCompilation(code_item_, method_access_flags_);
+ if (compile) {
+ /* Generate a register map and add it to the method. */
+ UniquePtr<const std::vector<uint8_t> > map(GenerateGcMap());
+ if (map.get() == NULL) {
+ DCHECK_NE(failures_.size(), 0U);
+ return false; // Not a real failure, but a failure to encode
+ }
+ if (kIsDebugBuild) {
+ VerifyGcMap(*map);
+ }
+ const std::vector<uint8_t>* dex_gc_map = CreateLengthPrefixedDexGcMap(*(map.get()));
+ verifier::MethodVerifier::SetDexGcMap(ref, *dex_gc_map);
+ }
- MethodVerifier::MethodSafeCastSet* method_to_safe_casts = GenerateSafeCastSet();
- if (method_to_safe_casts != NULL) {
- SetSafeCastMap(ref, method_to_safe_casts);
- }
+ if (has_check_casts_) {
+ MethodVerifier::MethodSafeCastSet* method_to_safe_casts = GenerateSafeCastSet();
+ if (method_to_safe_casts != NULL) {
+ SetSafeCastMap(ref, method_to_safe_casts);
+ }
+ }
- MethodVerifier::PcToConcreteMethodMap* pc_to_concrete_method = GenerateDevirtMap();
- if (pc_to_concrete_method != NULL) {
- SetDevirtMap(ref, pc_to_concrete_method);
+ if (has_virtual_or_interface_invokes_) {
+ MethodVerifier::PcToConcreteMethodMap* pc_to_concrete_method = GenerateDevirtMap();
+ if (pc_to_concrete_method != NULL) {
+ SetDevirtMap(ref, pc_to_concrete_method);
+ }
+ }
}
return true;
}
@@ -1154,13 +1212,15 @@
break;
}
default:
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected signature type char '" << descriptor << "'";
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected signature type char '"
+ << descriptor << "'";
return false;
}
cur_arg++;
}
if (cur_arg != expected_args) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected " << expected_args << " arguments, found " << cur_arg;
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected " << expected_args
+ << " arguments, found " << cur_arg;
return false;
}
const char* descriptor = dex_file_->GetReturnTypeDescriptor(proto_id);
@@ -1294,12 +1354,14 @@
if (dead_start < 0)
dead_start = insn_idx;
} else if (dead_start >= 0) {
- LogVerifyInfo() << "dead code " << reinterpret_cast<void*>(dead_start) << "-" << reinterpret_cast<void*>(insn_idx - 1);
+ LogVerifyInfo() << "dead code " << reinterpret_cast<void*>(dead_start)
+ << "-" << reinterpret_cast<void*>(insn_idx - 1);
dead_start = -1;
}
}
if (dead_start >= 0) {
- LogVerifyInfo() << "dead code " << reinterpret_cast<void*>(dead_start) << "-" << reinterpret_cast<void*>(insn_idx - 1);
+ LogVerifyInfo() << "dead code " << reinterpret_cast<void*>(dead_start)
+ << "-" << reinterpret_cast<void*>(insn_idx - 1);
}
// To dump the state of the verify after a method, do something like:
// if (PrettyMethod(dex_method_idx_, *dex_file_) ==
@@ -1456,7 +1518,8 @@
/* check the method signature */
const RegType& return_type = GetMethodReturnType();
if (!return_type.IsCategory1Types()) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected non-category 1 return type " << return_type;
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected non-category 1 return type "
+ << return_type;
} else {
// Compilers may generate synthetic functions that write byte values into boolean fields.
// Also, it may use integer values for boolean, byte, short, and character return types.
@@ -1505,10 +1568,14 @@
// Disallow returning uninitialized values and verify that the reference in vAA is an
// instance of the "return_type"
if (reg_type.IsUninitializedTypes()) {
- Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "returning uninitialized object '" << reg_type << "'";
+ Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "returning uninitialized object '"
+ << reg_type << "'";
} else if (!return_type.IsAssignableFrom(reg_type)) {
- Fail(reg_type.IsUnresolvedTypes() ? VERIFY_ERROR_BAD_CLASS_SOFT : VERIFY_ERROR_BAD_CLASS_HARD)
- << "returning '" << reg_type << "', but expected from declaration '" << return_type << "'";
+ Fail(reg_type.IsUnresolvedTypes() ?
+ VERIFY_ERROR_BAD_CLASS_SOFT :
+ VERIFY_ERROR_BAD_CLASS_HARD)
+ << "returning '" << reg_type << "', but expected from declaration '"
+ << return_type << "'";
}
}
}
@@ -1728,7 +1795,8 @@
case Instruction::THROW: {
const RegType& res_type = work_line_->GetRegisterType(inst->VRegA_11x());
if (!reg_types_.JavaLangThrowable(false).IsAssignableFrom(res_type)) {
- Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "thrown class " << res_type << " not instanceof Throwable";
+ Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "thrown class " << res_type
+ << " not instanceof Throwable";
}
break;
}
@@ -1750,7 +1818,8 @@
/* array_type can be null if the reg type is Zero */
if (!array_type.IsZero()) {
if (!array_type.IsArrayTypes()) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid fill-array-data with array type " << array_type;
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid fill-array-data with array type "
+ << array_type;
} else {
const RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_);
DCHECK(!component_type.IsConflict());
@@ -1790,8 +1859,8 @@
mismatch = !reg_type1.IsIntegralTypes() || !reg_type2.IsIntegralTypes();
}
if (mismatch) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "args to if-eq/if-ne (" << reg_type1 << "," << reg_type2
- << ") must both be references or integral";
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "args to if-eq/if-ne (" << reg_type1 << ","
+ << reg_type2 << ") must both be references or integral";
}
break;
}
@@ -1811,7 +1880,8 @@
case Instruction::IF_NEZ: {
const RegType& reg_type = work_line_->GetRegisterType(inst->VRegA_21t());
if (!reg_type.IsReferenceTypes() && !reg_type.IsIntegralTypes()) {
- Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "type " << reg_type << " unexpected as arg to if-eqz/if-nez";
+ Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "type " << reg_type
+ << " unexpected as arg to if-eqz/if-nez";
}
// Find previous instruction - its existence is a precondition to peephole optimization.
@@ -2133,7 +2203,10 @@
case Instruction::INVOKE_STATIC:
case Instruction::INVOKE_STATIC_RANGE: {
bool is_range = (inst->Opcode() == Instruction::INVOKE_STATIC_RANGE);
- mirror::AbstractMethod* called_method = VerifyInvocationArgs(inst, METHOD_STATIC, is_range, false);
+ mirror::AbstractMethod* called_method = VerifyInvocationArgs(inst,
+ METHOD_STATIC,
+ is_range,
+ false);
const char* descriptor;
if (called_method == NULL) {
uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
@@ -2155,7 +2228,10 @@
case Instruction::INVOKE_INTERFACE:
case Instruction::INVOKE_INTERFACE_RANGE: {
bool is_range = (inst->Opcode() == Instruction::INVOKE_INTERFACE_RANGE);
- mirror::AbstractMethod* abs_method = VerifyInvocationArgs(inst, METHOD_INTERFACE, is_range, false);
+ mirror::AbstractMethod* abs_method = VerifyInvocationArgs(inst,
+ METHOD_INTERFACE,
+ is_range,
+ false);
if (abs_method != NULL) {
mirror::Class* called_interface = abs_method->GetDeclaringClass();
if (!called_interface->IsInterface() && !called_interface->IsObjectClass()) {
@@ -2319,7 +2395,11 @@
case Instruction::MUL_FLOAT:
case Instruction::DIV_FLOAT:
case Instruction::REM_FLOAT:
- work_line_->CheckBinaryOp(inst, reg_types_.Float(), reg_types_.Float(), reg_types_.Float(), false);
+ work_line_->CheckBinaryOp(inst,
+ reg_types_.Float(),
+ reg_types_.Float(),
+ reg_types_.Float(),
+ false);
break;
case Instruction::ADD_DOUBLE:
case Instruction::SUB_DOUBLE:
@@ -2337,15 +2417,27 @@
case Instruction::SHL_INT_2ADDR:
case Instruction::SHR_INT_2ADDR:
case Instruction::USHR_INT_2ADDR:
- work_line_->CheckBinaryOp2addr(inst, reg_types_.Integer(), reg_types_.Integer(), reg_types_.Integer(), false);
+ work_line_->CheckBinaryOp2addr(inst,
+ reg_types_.Integer(),
+ reg_types_.Integer(),
+ reg_types_.Integer(),
+ false);
break;
case Instruction::AND_INT_2ADDR:
case Instruction::OR_INT_2ADDR:
case Instruction::XOR_INT_2ADDR:
- work_line_->CheckBinaryOp2addr(inst, reg_types_.Integer(), reg_types_.Integer(), reg_types_.Integer(), true);
+ work_line_->CheckBinaryOp2addr(inst,
+ reg_types_.Integer(),
+ reg_types_.Integer(),
+ reg_types_.Integer(),
+ true);
break;
case Instruction::DIV_INT_2ADDR:
- work_line_->CheckBinaryOp2addr(inst, reg_types_.Integer(), reg_types_.Integer(), reg_types_.Integer(), false);
+ work_line_->CheckBinaryOp2addr(inst,
+ reg_types_.Integer(),
+ reg_types_.Integer(),
+ reg_types_.Integer(),
+ false);
break;
case Instruction::ADD_LONG_2ADDR:
case Instruction::SUB_LONG_2ADDR:
@@ -2370,7 +2462,11 @@
case Instruction::MUL_FLOAT_2ADDR:
case Instruction::DIV_FLOAT_2ADDR:
case Instruction::REM_FLOAT_2ADDR:
- work_line_->CheckBinaryOp2addr(inst, reg_types_.Float(), reg_types_.Float(), reg_types_.Float(), false);
+ work_line_->CheckBinaryOp2addr(inst,
+ reg_types_.Float(),
+ reg_types_.Float(),
+ reg_types_.Float(),
+ false);
break;
case Instruction::ADD_DOUBLE_2ADDR:
case Instruction::SUB_DOUBLE_2ADDR:
@@ -2650,6 +2746,20 @@
// Make workline consistent with fallthrough computed from peephole optimization.
work_line_->CopyFromLine(fallthrough_line.get());
}
+ if (insn_flags_[next_insn_idx].IsReturn()) {
+ // For returns we only care about the operand to the return, all other registers are dead.
+ const Instruction* ret_inst = Instruction::At(code_item_->insns_ + next_insn_idx);
+ Instruction::Code opcode = ret_inst->Opcode();
+ if ((opcode == Instruction::RETURN_VOID) || (opcode == Instruction::RETURN_VOID_BARRIER)) {
+ work_line_->MarkAllRegistersAsConflicts();
+ } else {
+ if (opcode == Instruction::RETURN_WIDE) {
+ work_line_->MarkAllRegistersAsConflictsExceptWide(ret_inst->VRegA_11x());
+ } else {
+ work_line_->MarkAllRegistersAsConflictsExcept(ret_inst->VRegA_11x());
+ }
+ }
+ }
RegisterLine* next_line = reg_table_.GetLine(next_insn_idx);
if (next_line != NULL) {
// Merge registers into what we have for the next instruction,
@@ -3062,8 +3172,9 @@
for (size_t param_index = 0; param_index < params_size; param_index++) {
if (actual_args >= expected_args) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Rejecting invalid call to '" << PrettyMethod(res_method)
- << "'. Expected " << expected_args << " arguments, processing argument " << actual_args
- << " (where longs/doubles count twice).";
+ << "'. Expected " << expected_args
+ << " arguments, processing argument " << actual_args
+ << " (where longs/doubles count twice).";
return NULL;
}
const char* descriptor =
@@ -3216,7 +3327,8 @@
// The instruction agrees with the type of array, confirm the value to be stored does too
// Note: we use the instruction type (rather than the component type) for aput-object as
// incompatible classes will be caught at runtime as an array store exception
- work_line_->VerifyRegisterType(inst->VRegA_23x(), is_primitive ? component_type : insn_type);
+ work_line_->VerifyRegisterType(inst->VRegA_23x(),
+ is_primitive ? component_type : insn_type);
}
}
}
@@ -3235,8 +3347,10 @@
if (klass_type.IsUnresolvedTypes()) {
return NULL; // Can't resolve Class so no more to do here, will do checking at runtime.
}
- mirror::Field* field = Runtime::Current()->GetClassLinker()->ResolveFieldJLS(*dex_file_, field_idx,
- dex_cache_, class_loader_);
+ mirror::Field* field = Runtime::Current()->GetClassLinker()->ResolveFieldJLS(*dex_file_,
+ field_idx,
+ dex_cache_,
+ class_loader_);
if (field == NULL) {
LOG(INFO) << "Unable to resolve static field " << field_idx << " ("
<< dex_file_->GetFieldName(field_id) << ") in "
@@ -3270,8 +3384,10 @@
if (klass_type.IsUnresolvedTypes()) {
return NULL; // Can't resolve Class so no more to do here
}
- mirror::Field* field = Runtime::Current()->GetClassLinker()->ResolveFieldJLS(*dex_file_, field_idx,
- dex_cache_, class_loader_);
+ mirror::Field* field = Runtime::Current()->GetClassLinker()->ResolveFieldJLS(*dex_file_,
+ field_idx,
+ dex_cache_,
+ class_loader_);
if (field == NULL) {
LOG(INFO) << "Unable to resolve instance field " << field_idx << " ("
<< dex_file_->GetFieldName(field_id) << ") in "
@@ -3302,8 +3418,8 @@
// Field accesses through uninitialized references are only allowable for constructors where
// the field is declared in this class
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "cannot access instance field " << PrettyField(field)
- << " of a not fully initialized object within the context of "
- << PrettyMethod(dex_method_idx_, *dex_file_);
+ << " of a not fully initialized object within the context"
+ << " of " << PrettyMethod(dex_method_idx_, *dex_file_);
return NULL;
} else if (!field_klass.IsAssignableFrom(obj_type)) {
// Trying to access C1.field1 using reference of type C2, which is neither C1 or a sub-class
@@ -3637,9 +3753,28 @@
* there's nothing to "merge". Copy the registers over and mark it as changed. (This is the
* only way a register can transition out of "unknown", so this is not just an optimization.)
*/
- target_line->CopyFromLine(merge_line);
+ if (!insn_flags_[next_insn].IsReturn()) {
+ target_line->CopyFromLine(merge_line);
+ } else {
+ // For returns we only care about the operand to the return, all other registers are dead.
+ // Initialize them as conflicts so they don't add to GC and deoptimization information.
+ const Instruction* ret_inst = Instruction::At(code_item_->insns_ + next_insn);
+ Instruction::Code opcode = ret_inst->Opcode();
+ if ((opcode == Instruction::RETURN_VOID) || (opcode == Instruction::RETURN_VOID_BARRIER)) {
+ target_line->MarkAllRegistersAsConflicts();
+ } else {
+ target_line->CopyFromLine(merge_line);
+ if (opcode == Instruction::RETURN_WIDE) {
+ target_line->MarkAllRegistersAsConflictsExceptWide(ret_inst->VRegA_11x());
+ } else {
+ target_line->MarkAllRegistersAsConflictsExcept(ret_inst->VRegA_11x());
+ }
+ }
+ }
} else {
- UniquePtr<RegisterLine> copy(gDebugVerify ? new RegisterLine(target_line->NumRegs(), this) : NULL);
+ UniquePtr<RegisterLine> copy(gDebugVerify ?
+ new RegisterLine(target_line->NumRegs(), this) :
+ NULL);
if (gDebugVerify) {
copy->CopyFromLine(target_line);
}
@@ -3676,7 +3811,8 @@
const RegType& MethodVerifier::GetDeclaringClass() {
if (declaring_class_ == NULL) {
const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
- const char* descriptor = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(method_id.class_idx_));
+ const char* descriptor
+ = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(method_id.class_idx_));
if (mirror_method_ != NULL) {
mirror::Class* klass = mirror_method_->GetDeclaringClass();
declaring_class_ = ®_types_.FromClass(descriptor, klass,
@@ -3909,6 +4045,7 @@
}
void MethodVerifier::SetDexGcMap(MethodReference ref, const std::vector<uint8_t>& gc_map) {
+ DCHECK(Runtime::Current()->IsCompiler());
{
WriterMutexLock mu(Thread::Current(), *dex_gc_maps_lock_);
DexGcMapTable::iterator it = dex_gc_maps_->find(ref);
@@ -3923,6 +4060,7 @@
void MethodVerifier::SetSafeCastMap(MethodReference ref, const MethodSafeCastSet* cast_set) {
+ DCHECK(Runtime::Current()->IsCompiler());
MutexLock mu(Thread::Current(), *safecast_map_lock_);
SafeCastMap::iterator it = safecast_map_->find(ref);
if (it != safecast_map_->end()) {
@@ -3931,10 +4069,11 @@
}
safecast_map_->Put(ref, cast_set);
- CHECK(safecast_map_->find(ref) != safecast_map_->end());
+ DCHECK(safecast_map_->find(ref) != safecast_map_->end());
}
bool MethodVerifier::IsSafeCast(MethodReference ref, uint32_t pc) {
+ DCHECK(Runtime::Current()->IsCompiler());
MutexLock mu(Thread::Current(), *safecast_map_lock_);
SafeCastMap::const_iterator it = safecast_map_->find(ref);
if (it == safecast_map_->end()) {
@@ -3947,6 +4086,7 @@
}
const std::vector<uint8_t>* MethodVerifier::GetDexGcMap(MethodReference ref) {
+ DCHECK(Runtime::Current()->IsCompiler());
ReaderMutexLock mu(Thread::Current(), *dex_gc_maps_lock_);
DexGcMapTable::const_iterator it = dex_gc_maps_->find(ref);
if (it == dex_gc_maps_->end()) {
@@ -3959,6 +4099,7 @@
void MethodVerifier::SetDevirtMap(MethodReference ref,
const PcToConcreteMethodMap* devirt_map) {
+ DCHECK(Runtime::Current()->IsCompiler());
WriterMutexLock mu(Thread::Current(), *devirt_maps_lock_);
DevirtualizationMapTable::iterator it = devirt_maps_->find(ref);
if (it != devirt_maps_->end()) {
@@ -3967,11 +4108,12 @@
}
devirt_maps_->Put(ref, devirt_map);
- CHECK(devirt_maps_->find(ref) != devirt_maps_->end());
+ DCHECK(devirt_maps_->find(ref) != devirt_maps_->end());
}
const MethodReference* MethodVerifier::GetDevirtMap(const MethodReference& ref,
uint32_t dex_pc) {
+ DCHECK(Runtime::Current()->IsCompiler());
ReaderMutexLock mu(Thread::Current(), *devirt_maps_lock_);
DevirtualizationMapTable::const_iterator it = devirt_maps_->find(ref);
if (it == devirt_maps_->end()) {
@@ -3979,7 +4121,8 @@
}
// Look up the PC in the map, get the concrete method to execute and return its reference.
- MethodVerifier::PcToConcreteMethodMap::const_iterator pc_to_concrete_method = it->second->find(dex_pc);
+ MethodVerifier::PcToConcreteMethodMap::const_iterator pc_to_concrete_method
+ = it->second->find(dex_pc);
if (pc_to_concrete_method != it->second->end()) {
return &(pc_to_concrete_method->second);
} else {
@@ -4031,6 +4174,24 @@
return result;
}
+bool MethodVerifier::IsCandidateForCompilation(const DexFile::CodeItem* code_item,
+ const uint32_t access_flags) {
+ // Don't compile class initializers, ever.
+ if (((access_flags & kAccConstructor) != 0) && ((access_flags & kAccStatic) != 0)) {
+ return false;
+ }
+
+ const Runtime* runtime = Runtime::Current();
+ if (runtime->IsSmallMode() && runtime->UseCompileTimeClassPath()) {
+ // In Small mode, we only compile small methods.
+ const uint32_t code_size = code_item->insns_size_in_code_units_;
+ return (code_size < runtime->GetSmallModeMethodDexSizeLimit());
+ } else {
+ // In normal mode, we compile everything.
+ return true;
+ }
+}
+
ReaderWriterMutex* MethodVerifier::dex_gc_maps_lock_ = NULL;
MethodVerifier::DexGcMapTable* MethodVerifier::dex_gc_maps_ = NULL;
@@ -4044,65 +4205,79 @@
MethodVerifier::RejectedClassesTable* MethodVerifier::rejected_classes_ = NULL;
void MethodVerifier::Init() {
- dex_gc_maps_lock_ = new ReaderWriterMutex("verifier GC maps lock");
- Thread* self = Thread::Current();
- {
- WriterMutexLock mu(self, *dex_gc_maps_lock_);
- dex_gc_maps_ = new MethodVerifier::DexGcMapTable;
- }
+ if (Runtime::Current()->IsCompiler()) {
+ dex_gc_maps_lock_ = new ReaderWriterMutex("verifier GC maps lock");
+ Thread* self = Thread::Current();
+ {
+ WriterMutexLock mu(self, *dex_gc_maps_lock_);
+ dex_gc_maps_ = new MethodVerifier::DexGcMapTable;
+ }
- safecast_map_lock_ = new Mutex("verifier Cast Elision lock");
- {
- MutexLock mu(self, *safecast_map_lock_);
- safecast_map_ = new MethodVerifier::SafeCastMap();
- }
+ safecast_map_lock_ = new Mutex("verifier Cast Elision lock");
+ {
+ MutexLock mu(self, *safecast_map_lock_);
+ safecast_map_ = new MethodVerifier::SafeCastMap();
+ }
- devirt_maps_lock_ = new ReaderWriterMutex("verifier Devirtualization lock");
+ devirt_maps_lock_ = new ReaderWriterMutex("verifier Devirtualization lock");
- {
- WriterMutexLock mu(self, *devirt_maps_lock_);
- devirt_maps_ = new MethodVerifier::DevirtualizationMapTable();
- }
+ {
+ WriterMutexLock mu(self, *devirt_maps_lock_);
+ devirt_maps_ = new MethodVerifier::DevirtualizationMapTable();
+ }
- rejected_classes_lock_ = new Mutex("verifier rejected classes lock");
- {
- MutexLock mu(self, *rejected_classes_lock_);
- rejected_classes_ = new MethodVerifier::RejectedClassesTable;
+ rejected_classes_lock_ = new Mutex("verifier rejected classes lock");
+ {
+ MutexLock mu(self, *rejected_classes_lock_);
+ rejected_classes_ = new MethodVerifier::RejectedClassesTable;
+ }
}
art::verifier::RegTypeCache::Init();
}
void MethodVerifier::Shutdown() {
- Thread* self = Thread::Current();
- {
- WriterMutexLock mu(self, *dex_gc_maps_lock_);
- STLDeleteValues(dex_gc_maps_);
- delete dex_gc_maps_;
- dex_gc_maps_ = NULL;
- }
- delete dex_gc_maps_lock_;
- dex_gc_maps_lock_ = NULL;
+ if (Runtime::Current()->IsCompiler()) {
+ Thread* self = Thread::Current();
+ {
+ WriterMutexLock mu(self, *dex_gc_maps_lock_);
+ STLDeleteValues(dex_gc_maps_);
+ delete dex_gc_maps_;
+ dex_gc_maps_ = NULL;
+ }
+ delete dex_gc_maps_lock_;
+ dex_gc_maps_lock_ = NULL;
- {
- WriterMutexLock mu(self, *devirt_maps_lock_);
- STLDeleteValues(devirt_maps_);
- delete devirt_maps_;
- devirt_maps_ = NULL;
- }
- delete devirt_maps_lock_;
- devirt_maps_lock_ = NULL;
+ {
+ MutexLock mu(self, *safecast_map_lock_);
+ STLDeleteValues(safecast_map_);
+ delete safecast_map_;
+ safecast_map_ = NULL;
+ }
+ delete safecast_map_lock_;
+ safecast_map_lock_ = NULL;
- {
- MutexLock mu(self, *rejected_classes_lock_);
- delete rejected_classes_;
- rejected_classes_ = NULL;
+ {
+ WriterMutexLock mu(self, *devirt_maps_lock_);
+ STLDeleteValues(devirt_maps_);
+ delete devirt_maps_;
+ devirt_maps_ = NULL;
+ }
+ delete devirt_maps_lock_;
+ devirt_maps_lock_ = NULL;
+
+ {
+ MutexLock mu(self, *rejected_classes_lock_);
+ delete rejected_classes_;
+ rejected_classes_ = NULL;
+ }
+ delete rejected_classes_lock_;
+ rejected_classes_lock_ = NULL;
}
- delete rejected_classes_lock_;
- rejected_classes_lock_ = NULL;
verifier::RegTypeCache::ShutDown();
}
void MethodVerifier::AddRejectedClass(ClassReference ref) {
+ DCHECK(Runtime::Current()->IsCompiler());
{
MutexLock mu(Thread::Current(), *rejected_classes_lock_);
rejected_classes_->insert(ref);
@@ -4111,6 +4286,7 @@
}
bool MethodVerifier::IsClassRejected(ClassReference ref) {
+ DCHECK(Runtime::Current()->IsCompiler());
MutexLock mu(Thread::Current(), *rejected_classes_lock_);
return (rejected_classes_->find(ref) != rejected_classes_->end());
}
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index e1bcbb1..3f98a00 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -237,6 +237,9 @@
// Describe VRegs at the given dex pc.
std::vector<int32_t> DescribeVRegs(uint32_t dex_pc);
+ static bool IsCandidateForCompilation(const DexFile::CodeItem* code_item,
+ const uint32_t access_flags);
+
private:
// Adds the given string to the beginning of the last failure message.
void PrependToLastFailMessage(std::string);
@@ -654,7 +657,7 @@
LOCKS_EXCLUDED(devirt_maps_lock_);
typedef std::set<ClassReference> RejectedClassesTable;
static Mutex* rejected_classes_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- static RejectedClassesTable* rejected_classes_;
+ static RejectedClassesTable* rejected_classes_ GUARDED_BY(rejected_classes_lock_);
static void AddRejectedClass(ClassReference ref)
LOCKS_EXCLUDED(rejected_classes_lock_);
@@ -717,6 +720,13 @@
// Converts soft failures to hard failures when false. Only false when the compiler isn't
// running and the verifier is called from the class linker.
const bool allow_soft_failures_;
+
+ // Indicates if the method being verified contains at least one check-cast instruction.
+ bool has_check_casts_;
+
+ // Indicates if the method being verified contains at least one invoke-virtual/range
+ // or invoke-interface/range.
+ bool has_virtual_or_interface_invokes_;
};
std::ostream& operator<<(std::ostream& os, const MethodVerifier::FailureKind& rhs);
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index d2abaac..7965c06 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -167,7 +167,7 @@
DCHECK(uninit_type.IsUninitializedTypes());
const RegType& init_type = verifier_->GetRegTypeCache()->FromUninitialized(uninit_type);
size_t changed = 0;
- for (size_t i = 0; i < num_regs_; i++) {
+ for (uint32_t i = 0; i < num_regs_; i++) {
if (GetRegisterType(i).Equals(uninit_type)) {
line_[i] = init_type.GetId();
changed++;
@@ -176,6 +176,31 @@
DCHECK_GT(changed, 0u);
}
+void RegisterLine::MarkAllRegistersAsConflicts() {
+ uint16_t conflict_type_id = verifier_->GetRegTypeCache()->Conflict().GetId();
+ for (uint32_t i = 0; i < num_regs_; i++) {
+ line_[i] = conflict_type_id;
+ }
+}
+
+void RegisterLine::MarkAllRegistersAsConflictsExcept(uint32_t vsrc) {
+ uint16_t conflict_type_id = verifier_->GetRegTypeCache()->Conflict().GetId();
+ for (uint32_t i = 0; i < num_regs_; i++) {
+ if (i != vsrc) {
+ line_[i] = conflict_type_id;
+ }
+ }
+}
+
+void RegisterLine::MarkAllRegistersAsConflictsExceptWide(uint32_t vsrc) {
+ uint16_t conflict_type_id = verifier_->GetRegTypeCache()->Conflict().GetId();
+ for (uint32_t i = 0; i < num_regs_; i++) {
+ if ((i != vsrc) && (i != (vsrc + 1))) {
+ line_[i] = conflict_type_id;
+ }
+ }
+}
+
std::string RegisterLine::Dump() const {
std::string result;
for (size_t i = 0; i < num_regs_; i++) {
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index cde7b9b..f380877 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -141,6 +141,13 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*
+ * Update all registers to be Conflict except vsrc.
+ */
+ void MarkAllRegistersAsConflicts();
+ void MarkAllRegistersAsConflictsExcept(uint32_t vsrc);
+ void MarkAllRegistersAsConflictsExceptWide(uint32_t vsrc);
+
+ /*
* Check constraints on constructor return. Specifically, make sure that the "this" argument got
* initialized.
* The "this" argument to <init> uses code offset kUninitThisArgAddr, which puts it at the start
diff --git a/test/ReferenceMap/stack_walk_refmap_jni.cc b/test/ReferenceMap/stack_walk_refmap_jni.cc
index 10ca563..3b5d80d 100644
--- a/test/ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/ReferenceMap/stack_walk_refmap_jni.cc
@@ -103,7 +103,9 @@
// 0024: move-object v3, v2
// 0025: goto 0013
// Detaled dex instructions for ReferenceMap.java are at the end of this function.
- CHECK_REGS_CONTAIN_REFS(8, 3, 2, 1); // v8: this, v3: y, v2: y, v1: x
+ // CHECK_REGS_CONTAIN_REFS(8, 3, 2, 1); // v8: this, v3: y, v2: y, v1: x
+ // We eliminate the non-live registers at a return, so only v3 is live:
+ CHECK_REGS_CONTAIN_REFS(3); // v3: y
ref_bitmap = map.FindBitMap(m->NativePcOffset(m->ToFirstNativeSafepointPc(0x18U)));
CHECK(ref_bitmap);
@@ -188,7 +190,7 @@
// 0:[Unknown],1:[Reference: java.lang.Object[]],2:[Zero],3:[Reference: java.lang.Object],4:[32-bit Constant: 2],5:[Unknown],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
// |0010: +invoke-virtual-quick {v8, v7}, [000c] // vtable #000c
-// 0:[Conflict],1:[Reference: java.lang.Object[]],2:[Reference: java.lang.Object],3:[Reference: java.lang.Object],4:[32-bit Constant: 2],5:[Conflict],6:[32-bit Constant: 1],7:[Zero],8:[Reference: ReferenceMap],
+// 0:[Conflict],1:[Conflict],2:[Conflict],3:[Reference: java.lang.Object],4:[Conflict],5:[Conflict],6:[Conflict],7:[Conflict],8:[Conflict],
// |0013: return-object v3
// |0014: move-exception v0