Merge "Delegate long-to-float type conversions to the runtime on ARM."
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 6c8098b..4792734 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -3367,6 +3367,9 @@
return needs_type_check_;
}
+ // Can throw ArrayStoreException.
+ bool CanThrow() const OVERRIDE { return needs_type_check_; }
+
bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
UNUSED(obj);
// TODO: Same as for ArrayGet.
diff --git a/runtime/arch/arm/asm_support_arm.S b/runtime/arch/arm/asm_support_arm.S
index 2af636e..665d2a3 100644
--- a/runtime/arch/arm/asm_support_arm.S
+++ b/runtime/arch/arm/asm_support_arm.S
@@ -123,4 +123,18 @@
END \name
.endm
+// Macros to poison (negate) the reference for heap poisoning.
+.macro POISON_HEAP_REF rRef
+#ifdef USE_HEAP_POISONING
+ rsb \rRef, \rRef, #0
+#endif // USE_HEAP_POISONING
+.endm
+
+// Macros to unpoison (negate) the reference for heap poisoning.
+.macro UNPOISON_HEAP_REF rRef
+#ifdef USE_HEAP_POISONING
+ rsb \rRef, \rRef, #0
+#endif // USE_HEAP_POISONING
+.endm
+
#endif // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 2e7f34e..b06d2ca 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -616,12 +616,16 @@
ENTRY art_quick_aput_obj
cbz r2, .Ldo_aput_null
ldr r3, [r0, #MIRROR_OBJECT_CLASS_OFFSET]
+ UNPOISON_HEAP_REF r3
ldr ip, [r2, #MIRROR_OBJECT_CLASS_OFFSET]
+ UNPOISON_HEAP_REF ip
ldr r3, [r3, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET]
+ UNPOISON_HEAP_REF r3
cmp r3, ip @ value's type == array's component type - trivial assignability
bne .Lcheck_assignability
.Ldo_aput:
add r3, r0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
+ POISON_HEAP_REF r2
str r2, [r3, r1, lsl #2]
ldr r3, [r9, #THREAD_CARD_TABLE_OFFSET]
lsr r0, r0, #7
@@ -649,6 +653,7 @@
.cfi_restore lr
.cfi_adjust_cfa_offset -16
add r3, r0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
+ POISON_HEAP_REF r2
str r2, [r3, r1, lsl #2]
ldr r3, [r9, #THREAD_CARD_TABLE_OFFSET]
lsr r0, r0, #7
diff --git a/runtime/arch/arm64/asm_support_arm64.S b/runtime/arch/arm64/asm_support_arm64.S
index 051f40b..bcf55e3 100644
--- a/runtime/arch/arm64/asm_support_arm64.S
+++ b/runtime/arch/arm64/asm_support_arm64.S
@@ -55,4 +55,18 @@
END \name
.endm
+// Macros to poison (negate) the reference for heap poisoning.
+.macro POISON_HEAP_REF rRef
+#ifdef USE_HEAP_POISONING
+ neg \rRef, \rRef
+#endif // USE_HEAP_POISONING
+.endm
+
+// Macros to unpoison (negate) the reference for heap poisoning.
+.macro UNPOISON_HEAP_REF rRef
+#ifdef USE_HEAP_POISONING
+ neg \rRef, \rRef
+#endif // USE_HEAP_POISONING
+.endm
+
#endif // ART_RUNTIME_ARCH_ARM64_ASM_SUPPORT_ARM64_S_
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 790158e..78d3116 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1146,15 +1146,19 @@
cbz x2, .Ldo_aput_null
ldr w3, [x0, #MIRROR_OBJECT_CLASS_OFFSET] // Heap reference = 32b
// This also zero-extends to x3
+ UNPOISON_HEAP_REF w3
ldr w4, [x2, #MIRROR_OBJECT_CLASS_OFFSET] // Heap reference = 32b
// This also zero-extends to x4
+ UNPOISON_HEAP_REF w4
ldr w3, [x3, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET] // Heap reference = 32b
// This also zero-extends to x3
+ UNPOISON_HEAP_REF w3
cmp w3, w4 // value's type == array's component type - trivial assignability
bne .Lcheck_assignability
.Ldo_aput:
add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
// "Compress" = do nothing
+ POISON_HEAP_REF w2
str w2, [x3, x1, lsl #2] // Heap reference = 32b
ldr x3, [xSELF, #THREAD_CARD_TABLE_OFFSET]
lsr x0, x0, #7
@@ -1194,6 +1198,7 @@
add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET
// "Compress" = do nothing
+ POISON_HEAP_REF w2
str w2, [x3, x1, lsl #2] // Heap reference = 32b
ldr x3, [xSELF, #THREAD_CARD_TABLE_OFFSET]
lsr x0, x0, #7
diff --git a/runtime/arch/mips/asm_support_mips.S b/runtime/arch/mips/asm_support_mips.S
index eea6537..51e224c 100644
--- a/runtime/arch/mips/asm_support_mips.S
+++ b/runtime/arch/mips/asm_support_mips.S
@@ -115,5 +115,18 @@
#endif /* mips_isa_rev */
+// Macros to poison (negate) the reference for heap poisoning.
+.macro POISON_HEAP_REF rRef
+#ifdef USE_HEAP_POISONING
+ subu \rRef, $zero, \rRef
+#endif // USE_HEAP_POISONING
+.endm
+
+// Macros to unpoison (negate) the reference for heap poisoning.
+.macro UNPOISON_HEAP_REF rRef
+#ifdef USE_HEAP_POISONING
+ subu \rRef, $zero, \rRef
+#endif // USE_HEAP_POISONING
+.endm
#endif // ART_RUNTIME_ARCH_MIPS_ASM_SUPPORT_MIPS_S_
diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S
index 9e1dab6..3a0ea64 100644
--- a/runtime/arch/mips/quick_entrypoints_mips.S
+++ b/runtime/arch/mips/quick_entrypoints_mips.S
@@ -650,13 +650,17 @@
beqz $a2, .Ldo_aput_null
nop
lw $t0, MIRROR_OBJECT_CLASS_OFFSET($a0)
+ UNPOISON_HEAP_REF $t0
lw $t1, MIRROR_OBJECT_CLASS_OFFSET($a2)
+ UNPOISON_HEAP_REF $t1
lw $t0, MIRROR_CLASS_COMPONENT_TYPE_OFFSET($t0)
+ UNPOISON_HEAP_REF $t0
bne $t1, $t0, .Lcheck_assignability # value's type == array's component type - trivial assignability
nop
.Ldo_aput:
sll $a1, $a1, 2
add $t0, $a0, $a1
+ POISON_HEAP_REF $a2
sw $a2, MIRROR_OBJECT_ARRAY_DATA_OFFSET($t0)
lw $t0, THREAD_CARD_TABLE_OFFSET(rSELF)
srl $t1, $a0, 7
diff --git a/runtime/arch/mips64/asm_support_mips64.S b/runtime/arch/mips64/asm_support_mips64.S
index 2613777..b859c70 100644
--- a/runtime/arch/mips64/asm_support_mips64.S
+++ b/runtime/arch/mips64/asm_support_mips64.S
@@ -69,5 +69,18 @@
END \name
.endm
+// Macros to poison (negate) the reference for heap poisoning.
+.macro POISON_HEAP_REF rRef
+#ifdef USE_HEAP_POISONING
+ subu \rRef, $zero, \rRef
+#endif // USE_HEAP_POISONING
+.endm
+
+// Macros to unpoison (negate) the reference for heap poisoning.
+.macro UNPOISON_HEAP_REF rRef
+#ifdef USE_HEAP_POISONING
+ subu \rRef, $zero, \rRef
+#endif // USE_HEAP_POISONING
+.endm
#endif // ART_RUNTIME_ARCH_MIPS64_ASM_SUPPORT_MIPS64_S_
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index c62e035..b2cd7f2 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -649,7 +649,7 @@
daddiu $t0, $a5, 1 # t0 = shorty[1] (skip 1 for return type)
daddiu $t1, $a1, 4 # t1 = ptr to arg_array[4] (skip this ptr)
daddiu $t2, $a2, -4 # t2 = number of argument bytes remain (skip this ptr)
- daddiu $v0, $sp, 8 # v0 points to where to copy arg_array
+ daddiu $v0, $sp, 12 # v0 points to where to copy arg_array
LOOP_OVER_SHORTY_LOADING_REG a2, f14, call_fn
LOOP_OVER_SHORTY_LOADING_REG a3, f15, call_fn
LOOP_OVER_SHORTY_LOADING_REG a4, f16, call_fn
@@ -912,13 +912,17 @@
beq $a2, $zero, .Ldo_aput_null
nop
lwu $t0, MIRROR_OBJECT_CLASS_OFFSET($a0)
+ UNPOISON_HEAP_REF $t0
lwu $t1, MIRROR_OBJECT_CLASS_OFFSET($a2)
+ UNPOISON_HEAP_REF $t1
lwu $t0, MIRROR_CLASS_COMPONENT_TYPE_OFFSET($t0)
+ UNPOISON_HEAP_REF $t0
bne $t1, $t0, .Lcheck_assignability # value's type == array's component type - trivial assignability
nop
.Ldo_aput:
dsll $a1, $a1, 2
daddu $t0, $a0, $a1
+ POISON_HEAP_REF $a2
sw $a2, MIRROR_OBJECT_ARRAY_DATA_OFFSET($t0)
ld $t0, THREAD_CARD_TABLE_OFFSET(rSELF)
dsrl $t1, $a0, 7
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 62a6962..05b42f5 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -1124,7 +1124,7 @@
TEST_F(StubTest, APutObj) {
- TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
+ TEST_DISABLED_FOR_READ_BARRIER();
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
@@ -1258,7 +1258,7 @@
}
TEST_F(StubTest, AllocObject) {
- TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
+ TEST_DISABLED_FOR_READ_BARRIER();
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
@@ -1385,7 +1385,7 @@
}
TEST_F(StubTest, AllocObjectArray) {
- TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
+ TEST_DISABLED_FOR_READ_BARRIER();
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
@@ -1474,7 +1474,7 @@
TEST_F(StubTest, StringCompareTo) {
- TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
+ TEST_DISABLED_FOR_READ_BARRIER();
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
// TODO: Check the "Unresolved" allocation stubs
@@ -2152,7 +2152,7 @@
}
TEST_F(StubTest, Fields8) {
- TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
+ TEST_DISABLED_FOR_READ_BARRIER();
Thread* self = Thread::Current();
@@ -2166,7 +2166,7 @@
}
TEST_F(StubTest, Fields16) {
- TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
+ TEST_DISABLED_FOR_READ_BARRIER();
Thread* self = Thread::Current();
@@ -2180,7 +2180,7 @@
}
TEST_F(StubTest, Fields32) {
- TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
+ TEST_DISABLED_FOR_READ_BARRIER();
Thread* self = Thread::Current();
@@ -2193,7 +2193,7 @@
}
TEST_F(StubTest, FieldsObj) {
- TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
+ TEST_DISABLED_FOR_READ_BARRIER();
Thread* self = Thread::Current();
@@ -2206,7 +2206,7 @@
}
TEST_F(StubTest, Fields64) {
- TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
+ TEST_DISABLED_FOR_READ_BARRIER();
Thread* self = Thread::Current();
@@ -2221,7 +2221,7 @@
TEST_F(StubTest, IMT) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
- TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
+ TEST_DISABLED_FOR_READ_BARRIER();
Thread* self = Thread::Current();
@@ -2342,7 +2342,7 @@
TEST_F(StubTest, StringIndexOf) {
#if defined(__arm__) || defined(__aarch64__)
- TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
+ TEST_DISABLED_FOR_READ_BARRIER();
Thread* self = Thread::Current();
ScopedObjectAccess soa(self);
diff --git a/runtime/arch/x86/asm_support_x86.S b/runtime/arch/x86/asm_support_x86.S
index 122428b..2159f0e 100644
--- a/runtime/arch/x86/asm_support_x86.S
+++ b/runtime/arch/x86/asm_support_x86.S
@@ -179,4 +179,18 @@
#endif
END_MACRO
+// Macros to poison (negate) the reference for heap poisoning.
+MACRO1(POISON_HEAP_REF, rRef)
+#ifdef USE_HEAP_POISONING
+ neg REG_VAR(rRef, 0)
+#endif // USE_HEAP_POISONING
+END_MACRO
+
+// Macros to unpoison (negate) the reference for heap poisoning.
+MACRO1(UNPOISON_HEAP_REF, rRef)
+#ifdef USE_HEAP_POISONING
+ neg REG_VAR(rRef, 0)
+#endif // USE_HEAP_POISONING
+END_MACRO
+
#endif // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index e0397cc..44b67ca 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -33,6 +33,7 @@
movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg, 0)), REG_VAR(temp_reg, 1)
movl (REG_VAR(temp_reg, 1)), REG_VAR(temp_reg, 1)
// Push save all callee-save method.
+ THIS_LOAD_REQUIRES_READ_BARRIER
pushl RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg, 1))
CFI_ADJUST_CFA_OFFSET(4)
// Store esp as the top quick frame.
@@ -59,6 +60,7 @@
movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg, 0)), REG_VAR(temp_reg, 1)
movl (REG_VAR(temp_reg, 1)), REG_VAR(temp_reg, 1)
// Push save all callee-save method.
+ THIS_LOAD_REQUIRES_READ_BARRIER
pushl RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg, 1))
CFI_ADJUST_CFA_OFFSET(4)
// Store esp as the top quick frame.
@@ -104,6 +106,7 @@
movl SYMBOL(_ZN3art7Runtime9instance_E)@GOT(REG_VAR(got_reg, 0)), REG_VAR(temp_reg, 1)
movl (REG_VAR(temp_reg, 1)), REG_VAR(temp_reg, 1)
// Push save all callee-save method.
+ THIS_LOAD_REQUIRES_READ_BARRIER
pushl RUNTIME_REFS_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET(REG_VAR(temp_reg, 1))
CFI_ADJUST_CFA_OFFSET(4)
// Store esp as the stop quick frame.
@@ -1142,11 +1145,22 @@
test %edx, %edx // store of null
jz .Ldo_aput_null
movl MIRROR_OBJECT_CLASS_OFFSET(%eax), %ebx
+ UNPOISON_HEAP_REF ebx
movl MIRROR_CLASS_COMPONENT_TYPE_OFFSET(%ebx), %ebx
+ UNPOISON_HEAP_REF ebx
// value's type == array's component type - trivial assignability
+#ifdef USE_HEAP_POISONING
+ PUSH eax // save eax
+ movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %eax
+ UNPOISON_HEAP_REF eax
+ cmpl %eax, %ebx
+ POP eax // restore eax
+#else
cmpl MIRROR_OBJECT_CLASS_OFFSET(%edx), %ebx
+#endif
jne .Lcheck_assignability
.Ldo_aput:
+ POISON_HEAP_REF edx
movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%eax, %ecx, 4)
movl %fs:THREAD_CARD_TABLE_OFFSET, %edx
shrl LITERAL(7), %eax
@@ -1161,7 +1175,13 @@
PUSH edx
subl LITERAL(8), %esp // alignment padding
CFI_ADJUST_CFA_OFFSET(8)
+#ifdef USE_HEAP_POISONING
+ movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %eax // pass arg2 - type of the value to be stored
+ UNPOISON_HEAP_REF eax
+ PUSH eax
+#else
pushl MIRROR_OBJECT_CLASS_OFFSET(%edx) // pass arg2 - type of the value to be stored
+#endif
CFI_ADJUST_CFA_OFFSET(4)
PUSH ebx // pass arg1 - component type of the array
call SYMBOL(artIsAssignableFromCode) // (Class* a, Class* b)
@@ -1172,6 +1192,7 @@
POP edx
POP ecx
POP eax
+ POISON_HEAP_REF edx
movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%eax, %ecx, 4) // do the aput
movl %fs:THREAD_CARD_TABLE_OFFSET, %edx
shrl LITERAL(7), %eax
diff --git a/runtime/arch/x86_64/asm_support_x86_64.S b/runtime/arch/x86_64/asm_support_x86_64.S
index 5964314..b2b6c2d 100644
--- a/runtime/arch/x86_64/asm_support_x86_64.S
+++ b/runtime/arch/x86_64/asm_support_x86_64.S
@@ -170,4 +170,18 @@
int3
END_MACRO
+// Macros to poison (negate) the reference for heap poisoning.
+MACRO1(POISON_HEAP_REF, rRef)
+#ifdef USE_HEAP_POISONING
+ negl REG_VAR(rRef, 0)
+#endif // USE_HEAP_POISONING
+END_MACRO
+
+// Macros to unpoison (negate) the reference for heap poisoning.
+MACRO1(UNPOISON_HEAP_REF, rRef)
+#ifdef USE_HEAP_POISONING
+ negl REG_VAR(rRef, 0)
+#endif // USE_HEAP_POISONING
+END_MACRO
+
#endif // ART_RUNTIME_ARCH_X86_64_ASM_SUPPORT_X86_64_S_
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 48f59f3..66dfe5a 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -919,8 +919,10 @@
// RDI: uint32_t type_idx, RSI: ArtMethod*
// RDX, RCX, R8, R9: free. RAX: return val.
movl ART_METHOD_DEX_CACHE_TYPES_OFFSET(%rsi), %edx // Load dex cache resolved types array
+ UNPOISON_HEAP_REF edx
// Load the class
movl MIRROR_OBJECT_ARRAY_DATA_OFFSET(%rdx, %rdi, MIRROR_OBJECT_ARRAY_COMPONENT_SIZE), %edx
+ UNPOISON_HEAP_REF edx
testl %edx, %edx // Check null class
jz .Lart_quick_alloc_object_tlab_slow_path
// Check class status.
@@ -1189,12 +1191,21 @@
jz .Ldo_aput_null
movl MIRROR_OBJECT_CLASS_OFFSET(%edi), %ecx
// movq MIRROR_OBJECT_CLASS_OFFSET(%rdi), %rcx
+ UNPOISON_HEAP_REF ecx
movl MIRROR_CLASS_COMPONENT_TYPE_OFFSET(%ecx), %ecx
// movq MIRROR_CLASS_COMPONENT_TYPE_OFFSET(%rcx), %rcx
+ UNPOISON_HEAP_REF ecx
+#ifdef USE_HEAP_POISONING
+ movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %eax // rax is free.
+ UNPOISON_HEAP_REF eax
+ cmpl %eax, %ecx // value's type == array's component type - trivial assignability
+#else
cmpl MIRROR_OBJECT_CLASS_OFFSET(%edx), %ecx // value's type == array's component type - trivial assignability
// cmpq MIRROR_CLASS_OFFSET(%rdx), %rcx
+#endif
jne .Lcheck_assignability
.Ldo_aput:
+ POISON_HEAP_REF edx
movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%edi, %esi, 4)
// movq %rdx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%rdi, %rsi, 4)
movq %gs:THREAD_CARD_TABLE_OFFSET, %rdx
@@ -1217,6 +1228,7 @@
// "Uncompress" = do nothing, as already zero-extended on load.
movl MIRROR_OBJECT_CLASS_OFFSET(%edx), %esi // Pass arg2 = value's class.
+ UNPOISON_HEAP_REF esi
movq %rcx, %rdi // Pass arg1 = array's component type.
call SYMBOL(artIsAssignableFromCode) // (Class* a, Class* b)
@@ -1233,6 +1245,7 @@
POP rsi
POP rdi
+ POISON_HEAP_REF edx
movl %edx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%edi, %esi, 4)
// movq %rdx, MIRROR_OBJECT_ARRAY_DATA_OFFSET(%rdi, %rsi, 4)
movq %gs:THREAD_CARD_TABLE_OFFSET, %rdx
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 4266c4a..431ef27 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -5027,6 +5027,13 @@
}
}
}
+ // Fix up IMT in case it has any miranda methods in it.
+ for (size_t i = 0; i < mirror::Class::kImtSize; ++i) {
+ auto it = move_table.find(out_imt[i]);
+ if (it != move_table.end()) {
+ out_imt[i] = it->second;
+ }
+ }
// Check that there are no stale methods are in the dex cache array.
if (kIsDebugBuild) {
auto* resolved_methods = klass->GetDexCache()->GetResolvedMethods();
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 34fdd8d..0987c00 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -181,6 +181,13 @@
return; \
}
+// TODO: When read barrier works with the compiler, get rid of this.
+#define TEST_DISABLED_FOR_READ_BARRIER() \
+ if (kUseReadBarrier) { \
+ printf("WARNING: TEST DISABLED FOR READ BARRIER\n"); \
+ return; \
+ }
+
#define TEST_DISABLED_FOR_MIPS() \
if (kRuntimeISA == kMips) { \
printf("WARNING: TEST DISABLED FOR MIPS\n"); \
diff --git a/runtime/globals.h b/runtime/globals.h
index fe699c6..d70f3ab1 100644
--- a/runtime/globals.h
+++ b/runtime/globals.h
@@ -97,7 +97,7 @@
kUseTableLookupReadBarrier;
// If true, references within the heap are poisoned (negated).
-#ifdef ART_HEAP_POISONING
+#ifdef USE_HEAP_POISONING
static constexpr bool kPoisonHeapReferences = true;
#else
static constexpr bool kPoisonHeapReferences = false;
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 88a72ec..917fe43 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -1214,7 +1214,7 @@
// comment in Heap::VisitObjects().
heap->IncrementDisableMovingGC(self);
}
- Runtime::Current()->GetThreadList()->SuspendAll(__FUNCTION__);
+ Runtime::Current()->GetThreadList()->SuspendAll(__FUNCTION__, true /* long suspend */);
Hprof hprof(filename, fd, direct_to_ddms);
hprof.Dump();
Runtime::Current()->GetThreadList()->ResumeAll();
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 0ef58ea..20e4222 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -181,7 +181,9 @@
auto* env = self->GetJniEnv();
DCHECK(env != nullptr);
if (env->check_jni) {
- LOG(WARNING) << "Attempt to remove local handle scope entry from IRT, ignoring";
+ ScopedObjectAccess soa(self);
+ LOG(WARNING) << "Attempt to remove non-JNI local reference, dumping thread";
+ self->Dump(LOG(WARNING));
}
return true;
}
diff --git a/runtime/read_barrier_c.h b/runtime/read_barrier_c.h
index 88bda3a..4f408dd 100644
--- a/runtime/read_barrier_c.h
+++ b/runtime/read_barrier_c.h
@@ -31,6 +31,10 @@
// #define USE_TABLE_LOOKUP_READ_BARRIER
#endif
+#ifdef ART_HEAP_POISONING
+#define USE_HEAP_POISONING
+#endif
+
#if defined(USE_BAKER_READ_BARRIER) || defined(USE_BROOKS_READ_BARRIER)
#define USE_BAKER_OR_BROOKS_READ_BARRIER
#endif
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 7719bb8..af9ba68 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -52,7 +52,7 @@
ThreadList::ThreadList()
: suspend_all_count_(0), debug_suspend_all_count_(0), unregistering_count_(0),
- suspend_all_historam_("suspend all histogram", 16, 64) {
+ suspend_all_historam_("suspend all histogram", 16, 64), long_suspend_(false) {
CHECK(Monitor::IsValidLockWord(LockWord::FromThinLockId(kMaxThreadId, 1, 0U)));
}
@@ -448,7 +448,7 @@
return runnable_threads.size() + other_threads.size() + 1; // +1 for self.
}
-void ThreadList::SuspendAll(const char* cause) {
+void ThreadList::SuspendAll(const char* cause, bool long_suspend) {
Thread* self = Thread::Current();
if (self != nullptr) {
@@ -482,14 +482,22 @@
// Block on the mutator lock until all Runnable threads release their share of access.
#if HAVE_TIMED_RWLOCK
- // Timeout if we wait more than 30 seconds.
- if (!Locks::mutator_lock_->ExclusiveLockWithTimeout(self, kThreadSuspendTimeoutMs, 0)) {
- UnsafeLogFatalForThreadSuspendAllTimeout();
+ while (true) {
+ if (Locks::mutator_lock_->ExclusiveLockWithTimeout(self, kThreadSuspendTimeoutMs, 0)) {
+ break;
+ } else if (!long_suspend_) {
+ // Reading long_suspend without the mutator lock is slightly racy, in some rare cases, this
+ // could result in a thread suspend timeout.
+ // Timeout if we wait more than kThreadSuspendTimeoutMs seconds.
+ UnsafeLogFatalForThreadSuspendAllTimeout();
+ }
}
#else
Locks::mutator_lock_->ExclusiveLock(self);
#endif
+ long_suspend_ = long_suspend;
+
const uint64_t end_time = NanoTime();
const uint64_t suspend_time = end_time - start_time;
suspend_all_historam_.AdjustAndAddValue(suspend_time);
@@ -529,6 +537,8 @@
AssertThreadsAreSuspended(self, self);
}
+ long_suspend_ = false;
+
Locks::mutator_lock_->ExclusiveUnlock(self);
{
MutexLock mu(self, *Locks::thread_list_lock_);
@@ -599,8 +609,8 @@
jobject peer) {
JNIEnvExt* env = self->GetJniEnv();
ScopedLocalRef<jstring>
- scoped_name_string(env, (jstring)env->GetObjectField(peer,
- WellKnownClasses::java_lang_Thread_name));
+ scoped_name_string(env, (jstring)env->GetObjectField(
+ peer, WellKnownClasses::java_lang_Thread_name));
ScopedUtfChars scoped_name_chars(env, scoped_name_string.get());
if (scoped_name_chars.c_str() == nullptr) {
LOG(severity) << message << ": " << peer;
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index 0f094cc..2c1f813 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -61,7 +61,9 @@
LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_);
// Suspends all threads and gets exclusive access to the mutator_lock_.
- void SuspendAll(const char* cause)
+ // If long suspend is true, then other people who try to suspend will never timeout. Long suspend
+ // is currenly used for hprof since large heaps take a long time.
+ void SuspendAll(const char* cause, bool long_suspend = false)
EXCLUSIVE_LOCK_FUNCTION(Locks::mutator_lock_)
LOCKS_EXCLUDED(Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_);
@@ -184,6 +186,9 @@
// by mutator lock ensures no thread can read when another thread is modifying it.
Histogram<uint64_t> suspend_all_historam_ GUARDED_BY(Locks::mutator_lock_);
+ // Whether or not the current thread suspension is long.
+ bool long_suspend_;
+
friend class Thread;
DISALLOW_COPY_AND_ASSIGN(ThreadList);
diff --git a/runtime/trace.cc b/runtime/trace.cc
index d3b3af8..5a43b56 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -46,48 +46,8 @@
namespace art {
-// File format:
-// header
-// record 0
-// record 1
-// ...
-//
-// Header format:
-// u4 magic ('SLOW')
-// u2 version
-// u2 offset to data
-// u8 start date/time in usec
-// u2 record size in bytes (version >= 2 only)
-// ... padding to 32 bytes
-//
-// Record format v1:
-// u1 thread ID
-// u4 method ID | method action
-// u4 time delta since start, in usec
-//
-// Record format v2:
-// u2 thread ID
-// u4 method ID | method action
-// u4 time delta since start, in usec
-//
-// Record format v3:
-// u2 thread ID
-// u4 method ID | method action
-// u4 time delta since start, in usec
-// u4 wall time since start, in usec (when clock == "dual" only)
-//
-// 32 bits of microseconds is 70 minutes.
-//
-// All values are stored in little-endian order.
-
-enum TraceAction {
- kTraceMethodEnter = 0x00, // method entry
- kTraceMethodExit = 0x01, // method exit
- kTraceUnroll = 0x02, // method exited by exception unrolling
- // 0x03 currently unused
- kTraceMethodActionMask = 0x03, // two bits
-};
-
+static constexpr size_t TraceActionBits = MinimumBitsToStore(
+ static_cast<size_t>(kTraceMethodActionMask));
static constexpr uint8_t kOpNewMethod = 1U;
static constexpr uint8_t kOpNewThread = 2U;
@@ -120,8 +80,8 @@
static const uint32_t kTraceMagicValue = 0x574f4c53;
static const uint16_t kTraceVersionSingleClock = 2;
static const uint16_t kTraceVersionDualClock = 3;
-static const uint16_t kTraceRecordSizeSingleClock = 14; // using v2
-static const uint16_t kTraceRecordSizeDualClock = 18; // using v3 with two timestamps
+static const uint16_t kTraceRecordSizeSingleClock = 10; // using v2
+static const uint16_t kTraceRecordSizeDualClock = 14; // using v3 with two timestamps
TraceClockSource Trace::default_clock_source_ = kDefaultTraceClockSource;
@@ -132,26 +92,40 @@
// The key identifying the tracer to update instrumentation.
static constexpr const char* kTracerInstrumentationKey = "Tracer";
-static ArtMethod* DecodeTraceMethodId(uint64_t tmid) {
- return reinterpret_cast<ArtMethod*>(tmid & ~kTraceMethodActionMask);
-}
-
static TraceAction DecodeTraceAction(uint32_t tmid) {
return static_cast<TraceAction>(tmid & kTraceMethodActionMask);
}
-static uint64_t EncodeTraceMethodAndAction(ArtMethod* method, TraceAction action) {
- auto tmid = static_cast<uint64_t>(reinterpret_cast<uintptr_t>(method)) | action;
- DCHECK_EQ(method, DecodeTraceMethodId(tmid));
+ArtMethod* Trace::DecodeTraceMethod(uint32_t tmid) {
+ MutexLock mu(Thread::Current(), *unique_methods_lock_);
+ return unique_methods_[tmid >> TraceActionBits];
+}
+
+uint32_t Trace::EncodeTraceMethod(ArtMethod* method) {
+ MutexLock mu(Thread::Current(), *unique_methods_lock_);
+ uint32_t idx;
+ auto it = art_method_id_map_.find(method);
+ if (it != art_method_id_map_.end()) {
+ idx = it->second;
+ } else {
+ unique_methods_.push_back(method);
+ idx = unique_methods_.size() - 1;
+ art_method_id_map_.emplace(method, idx);
+ }
+ DCHECK_LT(idx, unique_methods_.size());
+ DCHECK_EQ(unique_methods_[idx], method);
+ return idx;
+}
+
+uint32_t Trace::EncodeTraceMethodAndAction(ArtMethod* method, TraceAction action) {
+ uint32_t tmid = (EncodeTraceMethod(method) << TraceActionBits) | action;
+ DCHECK_EQ(method, DecodeTraceMethod(tmid));
return tmid;
}
std::vector<ArtMethod*>* Trace::AllocStackTrace() {
- if (temp_stack_trace_.get() != nullptr) {
- return temp_stack_trace_.release();
- } else {
- return new std::vector<ArtMethod*>();
- }
+ return (temp_stack_trace_.get() != nullptr) ? temp_stack_trace_.release() :
+ new std::vector<ArtMethod*>();
}
void Trace::FreeStackTrace(std::vector<ArtMethod*>* stack_trace) {
@@ -272,24 +246,22 @@
if (old_stack_trace == nullptr) {
// If there's no previous stack trace sample for this thread, log an entry event for all
// methods in the trace.
- for (std::vector<ArtMethod*>::reverse_iterator rit = stack_trace->rbegin();
- rit != stack_trace->rend(); ++rit) {
+ for (auto rit = stack_trace->rbegin(); rit != stack_trace->rend(); ++rit) {
LogMethodTraceEvent(thread, *rit, instrumentation::Instrumentation::kMethodEntered,
thread_clock_diff, wall_clock_diff);
}
} else {
// If there's a previous stack trace for this thread, diff the traces and emit entry and exit
// events accordingly.
- std::vector<ArtMethod*>::reverse_iterator old_rit = old_stack_trace->rbegin();
- std::vector<ArtMethod*>::reverse_iterator rit = stack_trace->rbegin();
+ auto old_rit = old_stack_trace->rbegin();
+ auto rit = stack_trace->rbegin();
// Iterate bottom-up over both traces until there's a difference between them.
while (old_rit != old_stack_trace->rend() && rit != stack_trace->rend() && *old_rit == *rit) {
old_rit++;
rit++;
}
// Iterate top-down over the old trace until the point where they differ, emitting exit events.
- for (std::vector<ArtMethod*>::iterator old_it = old_stack_trace->begin();
- old_it != old_rit.base(); ++old_it) {
+ for (auto old_it = old_stack_trace->begin(); old_it != old_rit.base(); ++old_it) {
LogMethodTraceEvent(thread, *old_it, instrumentation::Instrumentation::kMethodExited,
thread_clock_diff, wall_clock_diff);
}
@@ -608,7 +580,8 @@
clock_source_(default_clock_source_),
buffer_size_(std::max(kMinBufSize, buffer_size)),
start_time_(MicroTime()), clock_overhead_ns_(GetClockOverheadNanoSeconds()), cur_offset_(0),
- overflow_(false), interval_us_(0), streaming_lock_(nullptr) {
+ overflow_(false), interval_us_(0), streaming_lock_(nullptr),
+ unique_methods_lock_(new Mutex("unique methods lock")) {
uint16_t trace_version = GetTraceVersion(clock_source_);
if (output_mode == TraceOutputMode::kStreaming) {
trace_version |= 0xF0U;
@@ -647,14 +620,13 @@
return ret;
}
-static void DumpBuf(uint8_t* buf, size_t buf_size, TraceClockSource clock_source)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+void Trace::DumpBuf(uint8_t* buf, size_t buf_size, TraceClockSource clock_source) {
uint8_t* ptr = buf + kTraceHeaderLength;
uint8_t* end = buf + buf_size;
while (ptr < end) {
- uint64_t tmid = ReadBytes(ptr + 2, sizeof(tmid));
- ArtMethod* method = DecodeTraceMethodId(tmid);
+ uint32_t tmid = ReadBytes(ptr + 2, sizeof(tmid));
+ ArtMethod* method = DecodeTraceMethod(tmid);
TraceAction action = DecodeTraceAction(tmid);
LOG(INFO) << PrettyMethod(method) << " " << static_cast<int>(action);
ptr += GetRecordSize(clock_source);
@@ -683,9 +655,7 @@
GetVisitedMethodsFromBitSets(seen_methods_, &visited_methods);
// Clean up.
- for (auto& e : seen_methods_) {
- delete e.second;
- }
+ STLDeleteValues(&seen_methods_);
} else {
final_offset = cur_offset_.LoadRelaxed();
GetVisitedMethods(final_offset, &visited_methods);
@@ -877,10 +847,10 @@
return false;
}
-static std::string GetMethodLine(ArtMethod* method)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+std::string Trace::GetMethodLine(ArtMethod* method) {
method = method->GetInterfaceMethodIfProxy(sizeof(void*));
- return StringPrintf("%p\t%s\t%s\t%s\t%s\n", method,
+ return StringPrintf("%p\t%s\t%s\t%s\t%s\n",
+ reinterpret_cast<void*>((EncodeTraceMethod(method) << TraceActionBits)),
PrettyDescriptor(method->GetDeclaringClassDescriptor()).c_str(), method->GetName(),
method->GetSignature().ToString().c_str(), method->GetDeclaringClassSourceFile());
}
@@ -945,11 +915,11 @@
UNIMPLEMENTED(FATAL) << "Unexpected event: " << event;
}
- uint64_t method_value = EncodeTraceMethodAndAction(method, action);
+ uint32_t method_value = EncodeTraceMethodAndAction(method, action);
// Write data
uint8_t* ptr;
- static constexpr size_t kPacketSize = 18U; // The maximum size of data in a packet.
+ static constexpr size_t kPacketSize = 14U; // The maximum size of data in a packet.
uint8_t stack_buf[kPacketSize]; // Space to store a packet when in streaming mode.
if (trace_output_mode_ == TraceOutputMode::kStreaming) {
ptr = stack_buf;
@@ -958,8 +928,8 @@
}
Append2LE(ptr, thread->GetTid());
- Append8LE(ptr + 2, method_value);
- ptr += 10;
+ Append4LE(ptr + 2, method_value);
+ ptr += 6;
if (UseThreadCpuClock()) {
Append4LE(ptr, thread_clock_diff);
@@ -968,7 +938,7 @@
if (UseWallClock()) {
Append4LE(ptr, wall_clock_diff);
}
- static_assert(kPacketSize == 2 + 8 + 4 + 4, "Packet size incorrect.");
+ static_assert(kPacketSize == 2 + 4 + 4 + 4, "Packet size incorrect.");
if (trace_output_mode_ == TraceOutputMode::kStreaming) {
MutexLock mu(Thread::Current(), *streaming_lock_); // To serialize writing.
@@ -1004,8 +974,8 @@
uint8_t* end = buf_.get() + buf_size;
while (ptr < end) {
- uint64_t tmid = ReadBytes(ptr + 2, sizeof(tmid));
- ArtMethod* method = DecodeTraceMethodId(tmid);
+ uint32_t tmid = ReadBytes(ptr + 2, sizeof(tmid));
+ ArtMethod* method = DecodeTraceMethod(tmid);
visited_methods->insert(method);
ptr += GetRecordSize(clock_source_);
}
diff --git a/runtime/trace.h b/runtime/trace.h
index 1539c06..7bc495a 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -23,6 +23,7 @@
#include <ostream>
#include <set>
#include <string>
+#include <unordered_map>
#include <vector>
#include "atomic.h"
@@ -51,6 +52,48 @@
kSampleProfilingActive,
};
+// File format:
+// header
+// record 0
+// record 1
+// ...
+//
+// Header format:
+// u4 magic ('SLOW')
+// u2 version
+// u2 offset to data
+// u8 start date/time in usec
+// u2 record size in bytes (version >= 2 only)
+// ... padding to 32 bytes
+//
+// Record format v1:
+// u1 thread ID
+// u4 method ID | method action
+// u4 time delta since start, in usec
+//
+// Record format v2:
+// u2 thread ID
+// u4 method ID | method action
+// u4 time delta since start, in usec
+//
+// Record format v3:
+// u2 thread ID
+// u4 method ID | method action
+// u4 time delta since start, in usec
+// u4 wall time since start, in usec (when clock == "dual" only)
+//
+// 32 bits of microseconds is 70 minutes.
+//
+// All values are stored in little-endian order.
+
+enum TraceAction {
+ kTraceMethodEnter = 0x00, // method entry
+ kTraceMethodExit = 0x01, // method exit
+ kTraceUnroll = 0x02, // method exited by exception unrolling
+ // 0x03 currently unused
+ kTraceMethodActionMask = 0x03, // two bits
+};
+
class Trace FINAL : public instrumentation::InstrumentationListener {
public:
enum TraceFlag {
@@ -173,6 +216,16 @@
void WriteToBuf(const uint8_t* src, size_t src_size)
EXCLUSIVE_LOCKS_REQUIRED(streaming_lock_);
+ uint32_t EncodeTraceMethod(ArtMethod* method) LOCKS_EXCLUDED(unique_methods_lock_);
+ uint32_t EncodeTraceMethodAndAction(ArtMethod* method, TraceAction action)
+ LOCKS_EXCLUDED(unique_methods_lock_);
+ ArtMethod* DecodeTraceMethod(uint32_t tmid) LOCKS_EXCLUDED(unique_methods_lock_);
+ std::string GetMethodLine(ArtMethod* method) LOCKS_EXCLUDED(unique_methods_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ void DumpBuf(uint8_t* buf, size_t buf_size, TraceClockSource clock_source)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
// Singleton instance of the Trace or null when no method tracing is active.
static Trace* volatile the_trace_ GUARDED_BY(Locks::trace_lock_);
@@ -229,6 +282,12 @@
std::map<mirror::DexCache*, DexIndexBitSet*> seen_methods_;
std::unique_ptr<ThreadIDBitSet> seen_threads_;
+ // Bijective map from ArtMethod* to index.
+ // Map from ArtMethod* to index in unique_methods_;
+ Mutex* unique_methods_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ std::unordered_map<ArtMethod*, uint32_t> art_method_id_map_ GUARDED_BY(unique_methods_lock_);
+ std::vector<ArtMethod*> unique_methods_ GUARDED_BY(unique_methods_lock_);
+
DISALLOW_COPY_AND_ASSIGN(Trace);
};
diff --git a/test/004-JniTest/jni_test.cc b/test/004-JniTest/jni_test.cc
index 71a2b2d..ca256ec 100644
--- a/test/004-JniTest/jni_test.cc
+++ b/test/004-JniTest/jni_test.cc
@@ -258,6 +258,11 @@
return char_returns[c1];
}
+extern "C" JNIEXPORT void JNICALL Java_Main_removeLocalObject(JNIEnv* env, jclass, jclass o) {
+ // Delete the arg to see if it crashes.
+ env->DeleteLocalRef(o);
+}
+
extern "C" JNIEXPORT jboolean JNICALL Java_Main_nativeIsAssignableFrom(JNIEnv* env, jclass,
jclass from, jclass to) {
return env->IsAssignableFrom(from, to);
diff --git a/test/004-JniTest/src/Main.java b/test/004-JniTest/src/Main.java
index 584fae3..ac20417 100644
--- a/test/004-JniTest/src/Main.java
+++ b/test/004-JniTest/src/Main.java
@@ -34,6 +34,7 @@
testShallowGetStackClass2();
testCallNonvirtual();
testNewStringObject();
+ testRemoveLocalObject();
}
private static native void testFindClassOnAttachedNativeThread();
@@ -111,6 +112,12 @@
}
}
+ private static native void removeLocalObject(Object o);
+
+ private static void testRemoveLocalObject() {
+ removeLocalObject(new Object());
+ }
+
private static native short shortMethod(short s1, short s2, short s3, short s4, short s5, short s6, short s7,
short s8, short s9, short s10);
diff --git a/test/458-long-to-fpu/info.txt b/test/458-long-to-fpu/info.txt
index 7459cfb..cf51df5c 100644
--- a/test/458-long-to-fpu/info.txt
+++ b/test/458-long-to-fpu/info.txt
@@ -1,2 +1,2 @@
Regression test for x86's code generator, which had a bug in
-the long-to-float and long-to-double implementations.
+the long-to-float and long-to-double implementations.
diff --git a/test/458-long-to-fpu/src/Main.java b/test/458-long-to-fpu/src/Main.java
index a8b6e78..d1615dc 100644
--- a/test/458-long-to-fpu/src/Main.java
+++ b/test/458-long-to-fpu/src/Main.java
@@ -16,27 +16,30 @@
public class Main {
public static void main(String[] args) {
- System.out.println(floatConvert(false));
- System.out.println(doubleConvert(false));
+ System.out.println($noinline$FloatConvert(false));
+ System.out.println($noinline$DoubleConvert(false));
}
- public static long floatConvert(boolean flag) {
- if (flag) {
- // Try defeating inlining.
- floatConvert(false);
+ // A dummy value to defeat inlining of these routines.
+ static boolean doThrow = false;
+
+ public static long $noinline$FloatConvert(boolean flag) {
+ // Try defeating inlining.
+ if (doThrow) {
+ throw new Error();
}
long l = myLong;
myFloat = (float)l;
return l;
}
- public static long doubleConvert(boolean flag) {
- if (flag) {
- // Try defeating inlining.
- floatConvert(false);
+ public static long $noinline$DoubleConvert(boolean flag) {
+ // Try defeating inlining.
+ if (doThrow) {
+ throw new Error();
}
long l = myLong;
- myFloat = (float)l;
+ myDouble = (double)l;
return l;
}