riscv64: [codegen] Implement Baker read barriers.
Implement codegen changes and entrypoints for Baker read
barriers. Also implement resolution and initialization
entrypoints and enable codegen for certain instructions to
allow stress-testing the Baker read barrier implementation.
Fix `CodeGeneratorRISCV64::Finalize()` to avoid finalizing
the code twice. This double finaization bug was exposed by
enabling compilation of some larger methods.
Test: # Edit `run-test` to disable checker, then
testrunner.py --target --64 --ndebug --optimizing
# Ignore 7 pre-existing failures.
Bug: 283082089
Change-Id: I55a128921b388fae1bf818bfbda0bcb18f6dbfb3
diff --git a/compiler/optimizing/code_generator_riscv64.cc b/compiler/optimizing/code_generator_riscv64.cc
index e617b65..7f23730 100644
--- a/compiler/optimizing/code_generator_riscv64.cc
+++ b/compiler/optimizing/code_generator_riscv64.cc
@@ -136,6 +136,17 @@
return static_cast<int64_t>(kShiftedStatusValue) - (INT64_C(1) << 32);
}
+int32_t ReadBarrierMarkEntrypointOffset(Location ref) {
+ DCHECK(ref.IsRegister());
+ int reg = ref.reg();
+ DCHECK(T0 <= reg && reg <= T6 && reg != TR) << reg;
+ // Note: Entrypoints for registers X30 (T5) and X31 (T6) are stored in entries
+ // for X0 (Zero) and X1 (RA) because these are not valid registers for marking
+ // and we currently have slots only up to register 29.
+ int entry_point_number = (reg >= 30) ? reg - 30 : reg;
+ return Thread::ReadBarrierMarkEntryPointsOffset<kRiscv64PointerSize>(entry_point_number);
+}
+
Location InvokeRuntimeCallingConvention::GetReturnLocation(DataType::Type return_type) {
return Riscv64ReturnLocation(return_type);
}
@@ -658,7 +669,7 @@
//
riscv64_codegen->ValidateInvokeRuntimeWithoutRecordingPcInfo(instruction_, this);
DCHECK_NE(entrypoint_.AsRegister<XRegister>(), TMP); // A taken branch can clobber `TMP`.
- __ Jalr(entrypoint_.AsRegister<XRegister>());
+ __ Jalr(entrypoint_.AsRegister<XRegister>()); // Clobbers `RA` (used as the `entrypoint_`).
__ J(GetExitLabel());
}
@@ -1178,16 +1189,13 @@
"have different sizes.");
// Slow path marking the GC root `root`.
- ScratchRegisterScope srs(GetAssembler());
- srs.ExcludeXRegister(TMP); // A taken branch can clobber `TMP`.
- XRegister tmp = srs.AllocateXRegister();
+ XRegister tmp = RA; // Use RA as temp. It is clobbered in the slow path anyway.
SlowPathCodeRISCV64* slow_path =
new (codegen_->GetScopedAllocator()) ReadBarrierMarkSlowPathRISCV64(
instruction, root, Location::RegisterLocation(tmp));
codegen_->AddSlowPath(slow_path);
- const int32_t entry_point_offset =
- Thread::ReadBarrierMarkEntryPointsOffset<kRiscv64PointerSize>(root.reg());
+ const int32_t entry_point_offset = ReadBarrierMarkEntrypointOffset(root);
// Loading the entrypoint does not require a load acquire since it is only changed when
// threads are suspended or running a checkpoint.
__ Loadd(tmp, TR, entry_point_offset);
@@ -1710,13 +1718,8 @@
uint32_t offset,
Location temp,
bool needs_null_check) {
- UNUSED(instruction);
- UNUSED(ref);
- UNUSED(obj);
- UNUSED(offset);
- UNUSED(temp);
- UNUSED(needs_null_check);
- LOG(FATAL) << "Unimplemented";
+ GenerateReferenceLoadWithBakerReadBarrier(
+ instruction, ref, obj, offset, /*index=*/ Location::NoLocation(), temp, needs_null_check);
}
void CodeGeneratorRISCV64::GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
@@ -1726,14 +1729,8 @@
Location index,
Location temp,
bool needs_null_check) {
- UNUSED(instruction);
- UNUSED(ref);
- UNUSED(obj);
- UNUSED(data_offset);
- UNUSED(index);
- UNUSED(temp);
- UNUSED(needs_null_check);
- LOG(FATAL) << "Unimplemented";
+ GenerateReferenceLoadWithBakerReadBarrier(
+ instruction, ref, obj, data_offset, index, temp, needs_null_check);
}
void CodeGeneratorRISCV64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
@@ -1741,20 +1738,43 @@
XRegister obj,
uint32_t offset,
Location index,
- ScaleFactor scale_factor,
Location temp,
- bool needs_null_check,
- bool always_update_field) {
- UNUSED(instruction);
- UNUSED(ref);
- UNUSED(obj);
- UNUSED(offset);
- UNUSED(index);
- UNUSED(scale_factor);
+ bool needs_null_check) {
+ // For now, use the same approach as for GC roots plus unpoison the reference if needed.
+ // TODO(riscv64): Implement checking if the holder is black.
UNUSED(temp);
- UNUSED(needs_null_check);
- UNUSED(always_update_field);
- LOG(FATAL) << "Unimplemented";
+
+ XRegister reg = ref.AsRegister<XRegister>();
+ if (index.IsValid()) {
+ DCHECK(instruction->IsArrayGet());
+ DCHECK(!needs_null_check);
+ DCHECK(index.IsRegister());
+ // /* HeapReference<Object> */ ref = *(obj + index * element_size + offset)
+ DataType::Type type = DataType::Type::kReference;
+ DCHECK_EQ(type, instruction->GetType());
+ instruction_visitor_.ShNAdd(reg, index.AsRegister<XRegister>(), obj, type);
+ __ Loadwu(reg, reg, offset);
+ } else {
+ // /* HeapReference<Object> */ ref = *(obj + offset)
+ __ Loadwu(reg, obj, offset);
+ if (needs_null_check) {
+ MaybeRecordImplicitNullCheck(instruction);
+ }
+ }
+ MaybeUnpoisonHeapReference(reg);
+
+ // Slow path marking the reference.
+ XRegister tmp = RA; // Use RA as temp. It is clobbered in the slow path anyway.
+ SlowPathCodeRISCV64* slow_path = new (GetScopedAllocator()) ReadBarrierMarkSlowPathRISCV64(
+ instruction, ref, Location::RegisterLocation(tmp));
+ AddSlowPath(slow_path);
+
+ const int32_t entry_point_offset = ReadBarrierMarkEntrypointOffset(ref);
+ // Loading the entrypoint does not require a load acquire since it is only changed when
+ // threads are suspended or running a checkpoint.
+ __ Loadd(tmp, TR, entry_point_offset);
+ __ Bnez(tmp, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
}
void CodeGeneratorRISCV64::GenerateReadBarrierSlow(HInstruction* instruction,
@@ -5837,8 +5857,6 @@
entry.code_interval.end = __ GetAdjustedPosition(entry.code_interval.end);
}
}
-
- CodeGenerator::Finalize();
}
// Generate code to invoke a runtime entry point.
diff --git a/compiler/optimizing/code_generator_riscv64.h b/compiler/optimizing/code_generator_riscv64.h
index 08df77e..375cec9 100644
--- a/compiler/optimizing/code_generator_riscv64.h
+++ b/compiler/optimizing/code_generator_riscv64.h
@@ -406,6 +406,8 @@
void GenerateMemoryBarrier(MemBarrierKind kind);
+ void ShNAdd(XRegister rd, XRegister rs1, XRegister rs2, DataType::Type type);
+
protected:
void GenerateClassInitializationCheck(SlowPathCodeRISCV64* slow_path, XRegister class_reg);
void GenerateBitstringTypeCheckCompare(HTypeCheckInstruction* check, XRegister temp);
@@ -519,8 +521,6 @@
void Load(Location out, XRegister rs1, int32_t offset, DataType::Type type);
void Store(Location value, XRegister rs1, int32_t offset, DataType::Type type);
- void ShNAdd(XRegister rd, XRegister rs1, XRegister rs2, DataType::Type type);
-
Riscv64Assembler* const assembler_;
CodeGeneratorRISCV64* const codegen_;
@@ -759,22 +759,13 @@
bool needs_null_check);
// Factored implementation, used by GenerateFieldLoadWithBakerReadBarrier
// and GenerateArrayLoadWithBakerReadBarrier.
- //
- // Load the object reference located at the address
- // `obj + offset + (index << scale_factor)`, held by object `obj`, into
- // `ref`, and mark it if needed.
- //
- // If `always_update_field` is true, the value of the reference is
- // atomically updated in the holder (`obj`).
void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
Location ref,
XRegister obj,
uint32_t offset,
Location index,
- ScaleFactor scale_factor,
Location temp,
- bool needs_null_check,
- bool always_update_field = false);
+ bool needs_null_check);
// Generate a read barrier for a heap reference within `instruction`
// using a slow path.
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index cca11e0..040c244 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -751,6 +751,14 @@
case HInstruction::kFloatConstant:
case HInstruction::kIntConstant:
case HInstruction::kLongConstant:
+ case HInstruction::kNullConstant:
+ case HInstruction::kLoadClass:
+ case HInstruction::kLoadString:
+ case HInstruction::kLoadMethodHandle:
+ case HInstruction::kLoadMethodType:
+ case HInstruction::kInstanceFieldGet:
+ case HInstruction::kStaticFieldGet:
+ case HInstruction::kArrayGet:
case HInstruction::kAbove:
case HInstruction::kAboveOrEqual:
case HInstruction::kBelow:
diff --git a/compiler/utils/riscv64/assembler_riscv64.cc b/compiler/utils/riscv64/assembler_riscv64.cc
index fd714c7..089bc5d 100644
--- a/compiler/utils/riscv64/assembler_riscv64.cc
+++ b/compiler/utils/riscv64/assembler_riscv64.cc
@@ -50,6 +50,7 @@
}
void Riscv64Assembler::FinalizeCode() {
+ CHECK(!finalized_);
Assembler::FinalizeCode();
ReserveJumpTableSpace();
EmitLiterals();
@@ -57,6 +58,7 @@
EmitBranches();
EmitJumpTables();
PatchCFI();
+ finalized_ = true;
}
void Riscv64Assembler::Emit(uint32_t value) {
diff --git a/compiler/utils/riscv64/assembler_riscv64.h b/compiler/utils/riscv64/assembler_riscv64.h
index d758345..15f2518 100644
--- a/compiler/utils/riscv64/assembler_riscv64.h
+++ b/compiler/utils/riscv64/assembler_riscv64.h
@@ -163,6 +163,7 @@
const Riscv64InstructionSetFeatures* instruction_set_features = nullptr)
: Assembler(allocator),
branches_(allocator->Adapter(kArenaAllocAssembler)),
+ finalized_(false),
overwriting_(false),
overwrite_location_(0),
literals_(allocator->Adapter(kArenaAllocAssembler)),
@@ -1042,6 +1043,9 @@
ArenaVector<Branch> branches_;
+ // For checking that we finalize the code only once.
+ bool finalized_;
+
// Whether appending instructions at the end of the buffer or overwriting the existing ones.
bool overwriting_;
// The current overwrite location.
diff --git a/runtime/arch/riscv64/asm_support_riscv64.S b/runtime/arch/riscv64/asm_support_riscv64.S
index b4042d7..dd9d107 100644
--- a/runtime/arch/riscv64/asm_support_riscv64.S
+++ b/runtime/arch/riscv64/asm_support_riscv64.S
@@ -450,7 +450,7 @@
.endm
-.macro RESTORE_SAVE_EVERYTHING_FRAME
+.macro RESTORE_SAVE_EVERYTHING_FRAME load_a0 = 1
// stack slot (8*0)(sp) is for ArtMethod*
// 32 slots for FPRs
@@ -498,7 +498,9 @@
#if SAVE_EVERYTHING_FRAME_OFFSET_A0 != (8*37)
#error "unexpected SAVE_EVERYTHING_FRAME_OFFSET_A0"
#endif
+ .if \load_a0
RESTORE_GPR a0, (8*37) // x10, offset must equal SAVE_EVERYTHING_FRAME_OFFSET_A0
+ .endif
RESTORE_GPR a1, (8*38) // x11
RESTORE_GPR a2, (8*39) // x12
RESTORE_GPR a3, (8*40) // x13
diff --git a/runtime/arch/riscv64/entrypoints_init_riscv64.cc b/runtime/arch/riscv64/entrypoints_init_riscv64.cc
index 7e93df7..3bf38b0 100644
--- a/runtime/arch/riscv64/entrypoints_init_riscv64.cc
+++ b/runtime/arch/riscv64/entrypoints_init_riscv64.cc
@@ -22,11 +22,68 @@
// art_quick_read_barrier_mark_regX uses an non-standard calling convention: it
// expects its input in register X and returns its result in that same register,
// and saves and restores all other registers.
+
+// No read barrier for X0 (Zero), X1 (RA), X2 (SP), X3 (GP) and X4 (TP).
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg05(mirror::Object*); // t0/x5
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg06(mirror::Object*); // t1/x6
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg07(mirror::Object*); // t2/x7
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg08(mirror::Object*); // t3/x8
+// No read barrier for X9 (S1/xSELF).
extern "C" mirror::Object* art_quick_read_barrier_mark_reg10(mirror::Object*); // a0/x10
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg11(mirror::Object*); // a1/x11
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg12(mirror::Object*); // a2/x12
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg13(mirror::Object*); // a3/x13
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg14(mirror::Object*); // a4/x14
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg15(mirror::Object*); // a5/x15
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg16(mirror::Object*); // a6/x16
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg17(mirror::Object*); // a7/x17
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg18(mirror::Object*); // s2/x18
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg19(mirror::Object*); // s3/x19
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg20(mirror::Object*); // s4/x20
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg21(mirror::Object*); // s5/x21
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg22(mirror::Object*); // s6/x22
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg23(mirror::Object*); // s7/x23
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg24(mirror::Object*); // s8/x24
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg25(mirror::Object*); // s9/x25
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg26(mirror::Object*); // s10/x26
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg27(mirror::Object*); // s11/x27
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg28(mirror::Object*); // t3/x28
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg29(mirror::Object*); // t4/x29
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg30(mirror::Object*); // t5/x30
+extern "C" mirror::Object* art_quick_read_barrier_mark_reg31(mirror::Object*); // t6/x31
void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_active) {
- // TODO(riscv64): add read barrier entrypoints
+ // No read barrier for X0 (Zero), X1 (RA), X2 (SP), X3 (GP) and X4 (TP).
+ qpoints->SetReadBarrierMarkReg05(is_active ? art_quick_read_barrier_mark_reg05 : nullptr);
+ qpoints->SetReadBarrierMarkReg06(is_active ? art_quick_read_barrier_mark_reg06 : nullptr);
+ qpoints->SetReadBarrierMarkReg07(is_active ? art_quick_read_barrier_mark_reg07 : nullptr);
+ qpoints->SetReadBarrierMarkReg08(is_active ? art_quick_read_barrier_mark_reg08 : nullptr);
+ // No read barrier for X9 (S1/xSELF).
qpoints->SetReadBarrierMarkReg10(is_active ? art_quick_read_barrier_mark_reg10 : nullptr);
+ qpoints->SetReadBarrierMarkReg11(is_active ? art_quick_read_barrier_mark_reg11 : nullptr);
+ qpoints->SetReadBarrierMarkReg12(is_active ? art_quick_read_barrier_mark_reg12 : nullptr);
+ qpoints->SetReadBarrierMarkReg13(is_active ? art_quick_read_barrier_mark_reg13 : nullptr);
+ qpoints->SetReadBarrierMarkReg14(is_active ? art_quick_read_barrier_mark_reg14 : nullptr);
+ qpoints->SetReadBarrierMarkReg15(is_active ? art_quick_read_barrier_mark_reg15 : nullptr);
+ qpoints->SetReadBarrierMarkReg16(is_active ? art_quick_read_barrier_mark_reg16 : nullptr);
+ qpoints->SetReadBarrierMarkReg17(is_active ? art_quick_read_barrier_mark_reg17 : nullptr);
+ qpoints->SetReadBarrierMarkReg18(is_active ? art_quick_read_barrier_mark_reg18 : nullptr);
+ qpoints->SetReadBarrierMarkReg19(is_active ? art_quick_read_barrier_mark_reg19 : nullptr);
+ qpoints->SetReadBarrierMarkReg20(is_active ? art_quick_read_barrier_mark_reg20 : nullptr);
+ qpoints->SetReadBarrierMarkReg21(is_active ? art_quick_read_barrier_mark_reg21 : nullptr);
+ qpoints->SetReadBarrierMarkReg22(is_active ? art_quick_read_barrier_mark_reg22 : nullptr);
+ qpoints->SetReadBarrierMarkReg23(is_active ? art_quick_read_barrier_mark_reg23 : nullptr);
+ qpoints->SetReadBarrierMarkReg24(is_active ? art_quick_read_barrier_mark_reg24 : nullptr);
+ qpoints->SetReadBarrierMarkReg25(is_active ? art_quick_read_barrier_mark_reg25 : nullptr);
+ qpoints->SetReadBarrierMarkReg26(is_active ? art_quick_read_barrier_mark_reg26 : nullptr);
+ qpoints->SetReadBarrierMarkReg27(is_active ? art_quick_read_barrier_mark_reg27 : nullptr);
+ qpoints->SetReadBarrierMarkReg28(is_active ? art_quick_read_barrier_mark_reg28 : nullptr);
+ qpoints->SetReadBarrierMarkReg29(is_active ? art_quick_read_barrier_mark_reg29 : nullptr);
+ // Note: Entrypoints for registers X30 (T5) and T31 (T6) are stored in entries
+ // for X0 (Zero) and X1 (RA) because these are not valid registers for marking
+ // and we currently have slots only up to register 29.
+ qpoints->SetReadBarrierMarkReg00(is_active ? art_quick_read_barrier_mark_reg30 : nullptr);
+ qpoints->SetReadBarrierMarkReg01(is_active ? art_quick_read_barrier_mark_reg31 : nullptr);
}
void InitEntryPoints(JniEntryPoints* jpoints,
diff --git a/runtime/arch/riscv64/quick_entrypoints_riscv64.S b/runtime/arch/riscv64/quick_entrypoints_riscv64.S
index 4b1d151..61beee9 100644
--- a/runtime/arch/riscv64/quick_entrypoints_riscv64.S
+++ b/runtime/arch/riscv64/quick_entrypoints_riscv64.S
@@ -545,6 +545,26 @@
.endm
+.macro DEOPT_OR_RESTORE_SAVE_EVERYTHING_FRAME_AND_RETURN_A0 temp, is_ref
+ ld \temp, THREAD_DEOPT_CHECK_REQUIRED_OFFSET(xSELF)
+ CFI_REMEMBER_STATE
+ bnez \temp, 2f
+ RESTORE_SAVE_EVERYTHING_FRAME /* load_a0= */ 0
+ ret
+2:
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
+ sd a0, SAVE_EVERYTHING_FRAME_OFFSET_A0(sp) // update result in the frame
+ li a2, \is_ref // pass if result is a reference
+ mv a1, x0 // pass the result
+ mv a0, xSELF // Thread::Current
+ call artDeoptimizeIfNeeded
+ CFI_REMEMBER_STATE
+ RESTORE_SAVE_EVERYTHING_FRAME
+ ret
+ CFI_RESTORE_STATE_AND_DEF_CFA sp, FRAME_SIZE_SAVE_EVERYTHING
+.endm
+
+
// Entry from managed code that tries to lock the object in a fast path and
// calls `artLockObjectFromCode()` for the difficult cases, may block for GC.
// A0 holds the possibly null object to lock.
@@ -1033,6 +1053,36 @@
END art_quick_update_inline_cache
+.macro ONE_ARG_SAVE_EVERYTHING_DOWNCALL \
+ name, entrypoint, runtime_method_offset = RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET
+ .extern \entrypoint
+ENTRY \name
+ SETUP_SAVE_EVERYTHING_FRAME \runtime_method_offset // Save everything for stack crawl.
+ mv a1, xSELF // Pass Thread::Current().
+ call \entrypoint // (uint32_t/Class* index/klass, Thread* self)
+ beqz a0, 1f // If result is null, deliver the exception.
+ DEOPT_OR_RESTORE_SAVE_EVERYTHING_FRAME_AND_RETURN_A0 /* temp= */ a1, /* is_ref= */ 1
+1:
+ DELIVER_PENDING_EXCEPTION_FRAME_READY
+END \name
+.endm
+
+.macro ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT name, entrypoint
+ ONE_ARG_SAVE_EVERYTHING_DOWNCALL \
+ \name, \entrypoint, RUNTIME_SAVE_EVERYTHING_FOR_CLINIT_METHOD_OFFSET
+.endm
+
+
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT \
+ art_quick_initialize_static_storage, artInitializeStaticStorageFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_resolve_type, artResolveTypeFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL \
+ art_quick_resolve_type_and_verify_access, artResolveTypeAndVerifyAccessFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_handle, artResolveMethodHandleFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_type, artResolveMethodTypeFromCode
+ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_string, artResolveStringFromCode
+
+
// Create a function `name` calling the art::ReadBarrier::Mark routine, getting its argument and
// returning its result through \reg, saving and restoring all caller-save registers.
//
@@ -1165,7 +1215,34 @@
.endm
+// No read barrier for X0 (Zero), X1 (RA), X2 (SP), X3 (GP) and X4 (TP).
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg05, t0
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg06, t1
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg07, t2
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg08, s0
+// No read barrier for X9 (S1/xSELF).
READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg10, a0
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg11, a1
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg12, a2
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg13, a3
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg14, a4
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg15, a5
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg16, a6
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg17, a7
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg18, s2
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg19, s3
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg20, s4
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg21, s5
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg22, s6
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg23, s7
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg24, s8
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg25, s9
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg26, s10
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg27, s11
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg28, t3
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg29, t4
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg30, t5
+READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg31, t6
UNDEFINED art_quick_deoptimize_from_compiled_code
@@ -1317,12 +1394,6 @@
UNDEFINED art_quick_alloc_string_from_chars_region_tlab_instrumented
UNDEFINED art_quick_alloc_string_from_string_region_tlab
UNDEFINED art_quick_alloc_string_from_string_region_tlab_instrumented
-UNDEFINED art_quick_initialize_static_storage
-UNDEFINED art_quick_resolve_type_and_verify_access
-UNDEFINED art_quick_resolve_type
-UNDEFINED art_quick_resolve_method_handle
-UNDEFINED art_quick_resolve_method_type
-UNDEFINED art_quick_resolve_string
UNDEFINED art_quick_set8_instance
UNDEFINED art_quick_set8_static
UNDEFINED art_quick_set16_instance
diff --git a/test/160-read-barrier-stress/src/Main.java b/test/160-read-barrier-stress/src/Main.java
index ab23358..cea09bf 100644
--- a/test/160-read-barrier-stress/src/Main.java
+++ b/test/160-read-barrier-stress/src/Main.java
@@ -22,20 +22,20 @@
public class Main {
public static void main(String[] args) throws Exception {
- testFieldReads();
- testArrayReadsWithConstIndex();
- testArrayReadsWithNonConstIndex();
- testGcRoots();
- testUnsafeGet();
- testUnsafeCas();
- testUnsafeCasRegression();
- testVarHandleCompareAndSet();
- testVarHandleCompareAndExchange();
- testVarHandleGetAndSet();
- testReferenceRefersTo();
+ $noinline$testFieldReads();
+ $noinline$testArrayReadsWithConstIndex();
+ $noinline$testArrayReadsWithNonConstIndex();
+ $noinline$testGcRoots();
+ $noinline$testUnsafeGet();
+ $noinline$testUnsafeCas();
+ $noinline$testUnsafeCasRegression();
+ $noinline$testVarHandleCompareAndSet();
+ $noinline$testVarHandleCompareAndExchange();
+ $noinline$testVarHandleGetAndSet();
+ $noinline$testReferenceRefersTo();
}
- public static void testFieldReads() {
+ public static void $noinline$testFieldReads() {
// Initialize local variables for comparison.
Object f0000 = manyFields.testField0000;
Object f1024 = manyFields.testField1024;
@@ -46,19 +46,19 @@
// over 64MiB memory (with heap size limited to 16MiB), ensuring we run GC and
// stress the read barrier implementation if concurrent collector is enabled.
for (int i = 0; i != 64 * 1024; ++i) {
- allocateAtLeast1KiB();
+ $noinline$allocateAtLeast1KiB();
ManyFields mf = manyFields; // Load the volatile `manyFields` once on each iteration.
// Test reference field access.
- assertSameObject(f0000, mf.testField0000);
- assertDifferentObject(f0000, mf.testField0001);
- assertSameObject(f1024, mf.testField1024);
- assertSameObject(f4444, mf.testField4444);
- assertDifferentObject(f4999, mf.testField4998);
- assertSameObject(f4999, mf.testField4999);
+ $noinline$assertSameObject(f0000, mf.testField0000);
+ $noinline$assertDifferentObject(f0000, mf.testField0001);
+ $noinline$assertSameObject(f1024, mf.testField1024);
+ $noinline$assertSameObject(f4444, mf.testField4444);
+ $noinline$assertDifferentObject(f4999, mf.testField4998);
+ $noinline$assertSameObject(f4999, mf.testField4999);
}
}
- public static void testArrayReadsWithConstIndex() {
+ public static void $noinline$testArrayReadsWithConstIndex() {
// Initialize local variables for comparison.
Object f0000 = new Integer(0);
Object f1024 = new Integer(1024);
@@ -79,19 +79,19 @@
// over 64MiB memory (with heap size limited to 16MiB), ensuring we run GC and
// stress the read barrier implementation if concurrent collector is enabled.
for (int i = 0; i != 64 * 1024; ++i) {
- allocateAtLeast1KiB();
+ $noinline$allocateAtLeast1KiB();
Object[] la = largeArray; // Load the volatile `largeArray` once on each iteration.
// Test array access with constant index.
- assertSameObject(f0000, la[0]);
- assertDifferentObject(f0000, la[1]);
- assertSameObject(f1024, la[1024]);
- assertSameObject(f4444, la[4444]);
- assertDifferentObject(f4999, la[4998]);
- assertSameObject(f4999, la[4999]);
+ $noinline$assertSameObject(f0000, la[0]);
+ $noinline$assertDifferentObject(f0000, la[1]);
+ $noinline$assertSameObject(f1024, la[1024]);
+ $noinline$assertSameObject(f4444, la[4444]);
+ $noinline$assertDifferentObject(f4999, la[4998]);
+ $noinline$assertSameObject(f4999, la[4999]);
}
}
- public static void testArrayReadsWithNonConstIndex() {
+ public static void $noinline$testArrayReadsWithNonConstIndex() {
// Initialize local variables for comparison.
Object f0000 = new Integer(0);
Object f1024 = new Integer(1024);
@@ -119,15 +119,15 @@
// over 64MiB memory (with heap size limited to 16MiB), ensuring we run GC and
// stress the read barrier implementation if concurrent collector is enabled.
for (int i = 0; i != 64 * 1024; ++i) {
- allocateAtLeast1KiB();
+ $noinline$allocateAtLeast1KiB();
Object[] la = largeArray; // Load the volatile `largeArray` once on each iteration.
// Test array access with non-constant index.
- assertSameObject(f0000, la[i0]);
- assertDifferentObject(f0000, la[i1]);
- assertSameObject(f1024, la[i1024]);
- assertSameObject(f4444, la[i4444]);
- assertDifferentObject(f4999, la[i4998]);
- assertSameObject(f4999, la[i4999]);
+ $noinline$assertSameObject(f0000, la[i0]);
+ $noinline$assertDifferentObject(f0000, la[i1]);
+ $noinline$assertSameObject(f1024, la[i1024]);
+ $noinline$assertSameObject(f4444, la[i4444]);
+ $noinline$assertDifferentObject(f4999, la[i4998]);
+ $noinline$assertSameObject(f4999, la[i4999]);
la = largeArray;
// Group the ArrayGets so they aren't divided by a function call; this will enable
@@ -139,16 +139,16 @@
Object tmp5 = la[i0 + 4998];
Object tmp6 = la[i0 + 4999];
- assertSameObject(f0000, tmp1);
- assertDifferentObject(f0000, tmp2);
- assertSameObject(f1024, tmp3);
- assertSameObject(f4444, tmp4);
- assertDifferentObject(f4999, tmp5);
- assertSameObject(f4999, tmp6);
+ $noinline$assertSameObject(f0000, tmp1);
+ $noinline$assertDifferentObject(f0000, tmp2);
+ $noinline$assertSameObject(f1024, tmp3);
+ $noinline$assertSameObject(f4444, tmp4);
+ $noinline$assertDifferentObject(f4999, tmp5);
+ $noinline$assertSameObject(f4999, tmp6);
}
}
- public static void testGcRoots() {
+ public static void $noinline$testGcRoots() {
// Initialize strings, hide this under a condition based on a volatile field.
String testString0 = null;
String testString1 = null;
@@ -167,19 +167,19 @@
// over 64MiB memory (with heap size limited to 16MiB), ensuring we run GC and
// stress the read barrier implementation if concurrent collector is enabled.
for (int i = 0; i != 64 * 1024; ++i) {
- allocateAtLeast1KiB();
+ $noinline$allocateAtLeast1KiB();
// Test GC roots.
if (index0 != 12345678) {
- assertSameObject(testString0, "testString0");
- assertSameObject(testString1, "testString1");
- assertSameObject(testString2, "testString2");
- assertSameObject(testString3, "testString3");
+ $noinline$assertSameObject(testString0, "testString0");
+ $noinline$assertSameObject(testString1, "testString1");
+ $noinline$assertSameObject(testString2, "testString2");
+ $noinline$assertSameObject(testString3, "testString3");
}
// TODO: Stress GC roots (const-class, kBssEntry/kReferrersClass).
}
}
- public static void testUnsafeGet() throws Exception {
+ public static void $noinline$testUnsafeGet() throws Exception {
// Initialize local variables for comparison.
Object f0000 = manyFields.testField0000;
Object f1024 = manyFields.testField1024;
@@ -204,19 +204,19 @@
// over 64MiB memory (with heap size limited to 16MiB), ensuring we run GC and
// stress the read barrier implementation if concurrent collector is enabled.
for (int i = 0; i != 64 * 1024; ++i) {
- allocateAtLeast1KiB();
+ $noinline$allocateAtLeast1KiB();
ManyFields mf = manyFields; // Load the volatile `manyFields` once on each iteration.
// Test Unsafe.getObject().
- assertSameObject(f0000, unsafe.getObject(mf, f0000Offset));
- assertDifferentObject(f0000, unsafe.getObject(mf, f0001Offset));
- assertSameObject(f1024, unsafe.getObject(mf, f1024Offset));
- assertSameObject(f4444, unsafe.getObject(mf, f4444Offset));
- assertDifferentObject(f4999, unsafe.getObject(mf, f4998Offset));
- assertSameObject(f4999, unsafe.getObject(mf, f4999Offset));
+ $noinline$assertSameObject(f0000, unsafe.getObject(mf, f0000Offset));
+ $noinline$assertDifferentObject(f0000, unsafe.getObject(mf, f0001Offset));
+ $noinline$assertSameObject(f1024, unsafe.getObject(mf, f1024Offset));
+ $noinline$assertSameObject(f4444, unsafe.getObject(mf, f4444Offset));
+ $noinline$assertDifferentObject(f4999, unsafe.getObject(mf, f4998Offset));
+ $noinline$assertSameObject(f4999, unsafe.getObject(mf, f4999Offset));
}
}
- public static void testUnsafeCas() throws Exception {
+ public static void $noinline$testUnsafeCas() throws Exception {
// Initialize local variables for comparison.
Object f0000 = manyFields.testField0000;
Object f1024 = manyFields.testField1024;
@@ -241,21 +241,29 @@
// over 64MiB memory (with heap size limited to 16MiB), ensuring we run GC and
// stress the read barrier implementation if concurrent collector is enabled.
for (int i = 0; i != 64 * 1024; ++i) {
- allocateAtLeast1KiB();
+ $noinline$allocateAtLeast1KiB();
ManyFields mf = manyFields; // Load the volatile `manyFields` once on each iteration.
// Test Unsafe.compareAndSwapObject().
- assertEqual(false, unsafe.compareAndSwapObject(mf, f0000Offset, f1024, f4444));
- assertEqual(false, unsafe.compareAndSwapObject(mf, f0001Offset, f1024, f4444));
- assertEqual(true, unsafe.compareAndSwapObject(mf, f1024Offset, f1024, f4444));
- assertEqual(true, unsafe.compareAndSwapObject(mf, f1024Offset, f4444, f1024));
- assertEqual(false, unsafe.compareAndSwapObject(mf, f1024Offset, f4444, f1024));
- assertEqual(false, unsafe.compareAndSwapObject(mf, f4444Offset, f1024, f4444));
- assertEqual(false, unsafe.compareAndSwapObject(mf, f4998Offset, f1024, f4444));
- assertEqual(false, unsafe.compareAndSwapObject(mf, f4999Offset, f1024, f4444));
+ $noinline$assertEqual(
+ false, unsafe.compareAndSwapObject(mf, f0000Offset, f1024, f4444));
+ $noinline$assertEqual(
+ false, unsafe.compareAndSwapObject(mf, f0001Offset, f1024, f4444));
+ $noinline$assertEqual(
+ true, unsafe.compareAndSwapObject(mf, f1024Offset, f1024, f4444));
+ $noinline$assertEqual(
+ true, unsafe.compareAndSwapObject(mf, f1024Offset, f4444, f1024));
+ $noinline$assertEqual(
+ false, unsafe.compareAndSwapObject(mf, f1024Offset, f4444, f1024));
+ $noinline$assertEqual(
+ false, unsafe.compareAndSwapObject(mf, f4444Offset, f1024, f4444));
+ $noinline$assertEqual(
+ false, unsafe.compareAndSwapObject(mf, f4998Offset, f1024, f4444));
+ $noinline$assertEqual(
+ false, unsafe.compareAndSwapObject(mf, f4999Offset, f1024, f4444));
}
}
- public static void testUnsafeCasRegression() throws Exception {
+ public static void $noinline$testUnsafeCasRegression() throws Exception {
// Initialize local variables for comparison.
Object f0000 = manyFields.testField0000;
// Initialize Unsafe.
@@ -267,7 +275,7 @@
// over 64MiB memory (with heap size limited to 16MiB), ensuring we run GC and
// stress the read barrier implementation if concurrent collector is enabled.
for (int i = 0; i != 64 * 1024; ++i) {
- allocateAtLeast1KiB();
+ $noinline$allocateAtLeast1KiB();
ManyFields mf = manyFields; // Load the volatile `manyFields` once on each iteration.
// With https://android-review.googlesource.com/729224 , the intrinsic could
@@ -286,7 +294,7 @@
}
}
- public static void testVarHandleCompareAndSet() throws Exception {
+ public static void $noinline$testVarHandleCompareAndSet() throws Exception {
// Initialize local variables for comparison.
Object f0000 = manyFields.testField0000;
Object f1024 = manyFields.testField1024;
@@ -310,21 +318,21 @@
// over 64MiB memory (with heap size limited to 16MiB), ensuring we run GC and
// stress the read barrier implementation if concurrent collector is enabled.
for (int i = 0; i != 64 * 1024; ++i) {
- allocateAtLeast1KiB();
+ $noinline$allocateAtLeast1KiB();
ManyFields mf = manyFields; // Load the volatile `manyFields` once on each iteration.
// Test VarHandle.compareAndSet().
- assertEqual(false, f0000vh.compareAndSet(mf, f1024, f4444));
- assertEqual(false, f0001vh.compareAndSet(mf, f1024, f4444));
- assertEqual(true, f1024vh.compareAndSet(mf, f1024, f4444));
- assertEqual(true, f1024vh.compareAndSet(mf, f4444, f1024));
- assertEqual(false, f1024vh.compareAndSet(mf, f4444, f1024));
- assertEqual(false, f4444vh.compareAndSet(mf, f1024, f4444));
- assertEqual(false, f4998vh.compareAndSet(mf, f1024, f4444));
- assertEqual(false, f4999vh.compareAndSet(mf, f1024, f4444));
+ $noinline$assertEqual(false, f0000vh.compareAndSet(mf, f1024, f4444));
+ $noinline$assertEqual(false, f0001vh.compareAndSet(mf, f1024, f4444));
+ $noinline$assertEqual(true, f1024vh.compareAndSet(mf, f1024, f4444));
+ $noinline$assertEqual(true, f1024vh.compareAndSet(mf, f4444, f1024));
+ $noinline$assertEqual(false, f1024vh.compareAndSet(mf, f4444, f1024));
+ $noinline$assertEqual(false, f4444vh.compareAndSet(mf, f1024, f4444));
+ $noinline$assertEqual(false, f4998vh.compareAndSet(mf, f1024, f4444));
+ $noinline$assertEqual(false, f4999vh.compareAndSet(mf, f1024, f4444));
}
}
- public static void testVarHandleCompareAndExchange() throws Exception {
+ public static void $noinline$testVarHandleCompareAndExchange() throws Exception {
// Initialize local variables for comparison.
Object f0000 = manyFields.testField0000;
Object f0001 = manyFields.testField0001;
@@ -350,21 +358,29 @@
// over 64MiB memory (with heap size limited to 16MiB), ensuring we run GC and
// stress the read barrier implementation if concurrent collector is enabled.
for (int i = 0; i != 64 * 1024; ++i) {
- allocateAtLeast1KiB();
+ $noinline$allocateAtLeast1KiB();
ManyFields mf = manyFields; // Load the volatile `manyFields` once on each iteration.
// Test VarHandle.compareAndExchange(). Use reference comparison, not equals().
- assertSameObject(f0000, f0000vh.compareAndExchange(mf, f1024, f4444)); // Unchanged.
- assertSameObject(f0001, f0001vh.compareAndExchange(mf, f1024, f4444)); // Unchanged.
- assertSameObject(f1024, f1024vh.compareAndExchange(mf, f1024, f4444)); // Replaced.
- assertSameObject(f4444, f1024vh.compareAndExchange(mf, f4444, f1024)); // Replaced.
- assertSameObject(f1024, f1024vh.compareAndExchange(mf, f4444, f1024)); // Unchanged.
- assertSameObject(f4444, f4444vh.compareAndExchange(mf, f1024, f4444)); // Unchanged.
- assertSameObject(f4998, f4998vh.compareAndExchange(mf, f1024, f4444)); // Unchanged.
- assertSameObject(f4999, f4999vh.compareAndExchange(mf, f1024, f4444)); // Unchanged.
+ $noinline$assertSameObject(
+ f0000, f0000vh.compareAndExchange(mf, f1024, f4444)); // Unchanged.
+ $noinline$assertSameObject(
+ f0001, f0001vh.compareAndExchange(mf, f1024, f4444)); // Unchanged.
+ $noinline$assertSameObject(
+ f1024, f1024vh.compareAndExchange(mf, f1024, f4444)); // Replaced.
+ $noinline$assertSameObject(
+ f4444, f1024vh.compareAndExchange(mf, f4444, f1024)); // Replaced.
+ $noinline$assertSameObject(
+ f1024, f1024vh.compareAndExchange(mf, f4444, f1024)); // Unchanged.
+ $noinline$assertSameObject(
+ f4444, f4444vh.compareAndExchange(mf, f1024, f4444)); // Unchanged.
+ $noinline$assertSameObject(
+ f4998, f4998vh.compareAndExchange(mf, f1024, f4444)); // Unchanged.
+ $noinline$assertSameObject(
+ f4999, f4999vh.compareAndExchange(mf, f1024, f4444)); // Unchanged.
}
}
- public static void testVarHandleGetAndSet() throws Exception {
+ public static void $noinline$testVarHandleGetAndSet() throws Exception {
// Initialize local variables for comparison.
Object f0000 = manyFields.testField0000;
Object f0001 = manyFields.testField0001;
@@ -390,21 +406,21 @@
// over 64MiB memory (with heap size limited to 16MiB), ensuring we run GC and
// stress the read barrier implementation if concurrent collector is enabled.
for (int i = 0; i != 64 * 1024; ++i) {
- allocateAtLeast1KiB();
+ $noinline$allocateAtLeast1KiB();
ManyFields mf = manyFields; // Load the volatile `manyFields` once on each iteration.
// Test VarHandle.getAndSet(). Use reference comparison, not equals().
- assertSameObject(f0000, f0000vh.getAndSet(mf, f0000)); // Unchanged.
- assertSameObject(f0001, f0001vh.getAndSet(mf, f0001)); // Unchanged.
- assertSameObject(f1024, f1024vh.getAndSet(mf, f4444)); // Replaced.
- assertSameObject(f4444, f1024vh.getAndSet(mf, f1024)); // Replaced.
- assertSameObject(f1024, f1024vh.getAndSet(mf, f1024)); // Unchanged.
- assertSameObject(f4444, f4444vh.getAndSet(mf, f4444)); // Unchanged.
- assertSameObject(f4998, f4998vh.getAndSet(mf, f4998)); // Unchanged.
- assertSameObject(f4999, f4999vh.getAndSet(mf, f4999)); // Unchanged.
+ $noinline$assertSameObject(f0000, f0000vh.getAndSet(mf, f0000)); // Unchanged.
+ $noinline$assertSameObject(f0001, f0001vh.getAndSet(mf, f0001)); // Unchanged.
+ $noinline$assertSameObject(f1024, f1024vh.getAndSet(mf, f4444)); // Replaced.
+ $noinline$assertSameObject(f4444, f1024vh.getAndSet(mf, f1024)); // Replaced.
+ $noinline$assertSameObject(f1024, f1024vh.getAndSet(mf, f1024)); // Unchanged.
+ $noinline$assertSameObject(f4444, f4444vh.getAndSet(mf, f4444)); // Unchanged.
+ $noinline$assertSameObject(f4998, f4998vh.getAndSet(mf, f4998)); // Unchanged.
+ $noinline$assertSameObject(f4999, f4999vh.getAndSet(mf, f4999)); // Unchanged.
}
}
- public static void testReferenceRefersTo() throws Exception {
+ public static void $noinline$testReferenceRefersTo() throws Exception {
// Initialize local variables for comparison.
manyFields.testField0000 = new Object();
manyFields.testField1024 = new Object();
@@ -419,33 +435,33 @@
// over 64MiB memory (with heap size limited to 16MiB), ensuring we run GC and stress the
// read barrier implementation in Reference.refersTo() if concurrent collector is enabled.
for (int i = 0; i != 64 * 1024; ++i) {
- allocateAtLeast1KiB();
+ $noinline$allocateAtLeast1KiB();
ManyFields mf = manyFields; // Load the volatile `manyFields` once on each iteration.
// Test Reference.refersTo() with reference field access.
- assertEqual(true, f0000.refersTo(mf.testField0000));
- assertEqual(false, f0000.refersTo(mf.testField0001));
- assertEqual(true, f1024.refersTo(mf.testField1024));
- assertEqual(true, f4444.refersTo(mf.testField4444));
- assertEqual(false, f4999.refersTo(mf.testField4998));
- assertEqual(true, f4999.refersTo(mf.testField4999));
+ $noinline$assertEqual(true, f0000.refersTo(mf.testField0000));
+ $noinline$assertEqual(false, f0000.refersTo(mf.testField0001));
+ $noinline$assertEqual(true, f1024.refersTo(mf.testField1024));
+ $noinline$assertEqual(true, f4444.refersTo(mf.testField4444));
+ $noinline$assertEqual(false, f4999.refersTo(mf.testField4998));
+ $noinline$assertEqual(true, f4999.refersTo(mf.testField4999));
}
}
public static int $noinline$foo() { return 42; }
- public static void assertDifferentObject(Object lhs, Object rhs) {
+ public static void $noinline$assertDifferentObject(Object lhs, Object rhs) {
if (lhs == rhs) {
throw new Error("Same objects: " + lhs + " and " + rhs);
}
}
- public static void assertSameObject(Object lhs, Object rhs) {
+ public static void $noinline$assertSameObject(Object lhs, Object rhs) {
if (lhs != rhs) {
throw new Error("Different objects: " + lhs + " and " + rhs);
}
}
- public static void assertEqual(boolean expected, boolean actual) {
+ public static void $noinline$assertEqual(boolean expected, boolean actual) {
if (expected != actual) {
throw new Error("Expected " + expected +", got " + actual);
}
@@ -458,7 +474,7 @@
return (Unsafe) f.get(null);
}
- public static void allocateAtLeast1KiB() {
+ public static void $noinline$allocateAtLeast1KiB() {
// Give GC more work by allocating Object arrays.
memory[allocationIndex] = new Object[1024 / 4];
++allocationIndex;