Small clean up in art::x86_64::ReadBarrierMarkSlowPathX86_64.
Clearly separate art::x86_64::CpuRegister (register object
used by the x86-64 assembler) from art::x86_64::Register
(register number) in
art::x86_64::ReadBarrierMarkSlowPathX86_64::EmitNativeCode.
Test: ART_USE_READ_BARRIER=true make test-art-host
Bug: 12687968
Change-Id: I4300e9b9c16b18119d4e399092aa1c9543518ab5
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 665d028..5cabc8f 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -457,7 +457,8 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
- Register reg = obj_.AsRegister<Register>();
+ CpuRegister cpu_reg = obj_.AsRegister<CpuRegister>();
+ Register reg = cpu_reg.AsRegister();
DCHECK(locations->CanCall());
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg));
DCHECK(instruction_->IsInstanceFieldGet() ||
@@ -476,7 +477,7 @@
__ Bind(GetEntryLabel());
if (unpoison_) {
// Object* ref = ref_addr->AsMirrorPtr()
- __ MaybeUnpoisonHeapReference(obj_.AsRegister<CpuRegister>());
+ __ MaybeUnpoisonHeapReference(cpu_reg);
}
// No need to save live registers; it's taken care of by the
// entrypoint. Also, there is no need to update the stack mask,