blob: f33e6fa063c95e5271d30d401f45632c23b566ef [file] [log] [blame]
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <limits.h> // For LONG_MIN, LONG_MAX.
#include "v8.h"
#if V8_TARGET_ARCH_MIPS
#include "bootstrapper.h"
#include "codegen.h"
#include "cpu-profiler.h"
#include "debug.h"
#include "isolate-inl.h"
#include "runtime.h"
namespace v8 {
namespace internal {
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
: Assembler(arg_isolate, buffer, size),
generating_stub_(false),
has_frame_(false) {
if (isolate() != NULL) {
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
isolate());
}
}
void MacroAssembler::Load(Register dst,
const MemOperand& src,
Representation r) {
ASSERT(!r.IsDouble());
if (r.IsInteger8()) {
lb(dst, src);
} else if (r.IsUInteger8()) {
lbu(dst, src);
} else if (r.IsInteger16()) {
lh(dst, src);
} else if (r.IsUInteger16()) {
lhu(dst, src);
} else {
lw(dst, src);
}
}
void MacroAssembler::Store(Register src,
const MemOperand& dst,
Representation r) {
ASSERT(!r.IsDouble());
if (r.IsInteger8() || r.IsUInteger8()) {
sb(src, dst);
} else if (r.IsInteger16() || r.IsUInteger16()) {
sh(src, dst);
} else {
sw(src, dst);
}
}
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index) {
lw(destination, MemOperand(s6, index << kPointerSizeLog2));
}
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index,
Condition cond,
Register src1, const Operand& src2) {
Branch(2, NegateCondition(cond), src1, src2);
lw(destination, MemOperand(s6, index << kPointerSizeLog2));
}
void MacroAssembler::StoreRoot(Register source,
Heap::RootListIndex index) {
sw(source, MemOperand(s6, index << kPointerSizeLog2));
}
void MacroAssembler::StoreRoot(Register source,
Heap::RootListIndex index,
Condition cond,
Register src1, const Operand& src2) {
Branch(2, NegateCondition(cond), src1, src2);
sw(source, MemOperand(s6, index << kPointerSizeLog2));
}
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of kNumSafepointRegisters values on the
// stack, so adjust the stack for unsaved registers.
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
ASSERT(num_unsaved >= 0);
if (num_unsaved > 0) {
Subu(sp, sp, Operand(num_unsaved * kPointerSize));
}
MultiPush(kSafepointSavedRegisters);
}
void MacroAssembler::PopSafepointRegisters() {
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
MultiPop(kSafepointSavedRegisters);
if (num_unsaved > 0) {
Addu(sp, sp, Operand(num_unsaved * kPointerSize));
}
}
void MacroAssembler::PushSafepointRegistersAndDoubles() {
PushSafepointRegisters();
Subu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) {
FPURegister reg = FPURegister::FromAllocationIndex(i);
sdc1(reg, MemOperand(sp, i * kDoubleSize));
}
}
void MacroAssembler::PopSafepointRegistersAndDoubles() {
for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) {
FPURegister reg = FPURegister::FromAllocationIndex(i);
ldc1(reg, MemOperand(sp, i * kDoubleSize));
}
Addu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
PopSafepointRegisters();
}
void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
Register dst) {
sw(src, SafepointRegistersAndDoublesSlot(dst));
}
void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
sw(src, SafepointRegisterSlot(dst));
}
void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
lw(dst, SafepointRegisterSlot(src));
}
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// The registers are pushed starting with the highest encoding,
// which means that lowest encodings are closest to the stack pointer.
return kSafepointRegisterStackIndexMap[reg_code];
}
MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
}
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
UNIMPLEMENTED_MIPS();
// General purpose registers are pushed last on the stack.
int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
return MemOperand(sp, doubles_size + register_offset);
}
void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cc,
Label* branch) {
ASSERT(cc == eq || cc == ne);
And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
Branch(branch, cc, scratch,
Operand(ExternalReference::new_space_start(isolate())));
}
void MacroAssembler::RecordWriteField(
Register object,
int offset,
Register value,
Register dst,
RAStatus ra_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
ASSERT(!AreAliased(value, dst, t8, object));
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
// Skip barrier if writing a smi.
if (smi_check == INLINE_SMI_CHECK) {
JumpIfSmi(value, &done);
}
// Although the object register is tagged, the offset is relative to the start
// of the object, so so offset must be a multiple of kPointerSize.
ASSERT(IsAligned(offset, kPointerSize));
Addu(dst, object, Operand(offset - kHeapObjectTag));
if (emit_debug_code()) {
Label ok;
And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
Branch(&ok, eq, t8, Operand(zero_reg));
stop("Unaligned cell in write barrier");
bind(&ok);
}
RecordWrite(object,
dst,
value,
ra_status,
save_fp,
remembered_set_action,
OMIT_SMI_CHECK);
bind(&done);
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
li(value, Operand(BitCast<int32_t>(kZapValue + 4)));
li(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
}
}
// Will clobber 4 registers: object, address, scratch, ip. The
// register 'object' contains a heap object pointer. The heap object
// tag is shifted away.
void MacroAssembler::RecordWrite(Register object,
Register address,
Register value,
RAStatus ra_status,
SaveFPRegsMode fp_mode,
RememberedSetAction remembered_set_action,
SmiCheck smi_check) {
ASSERT(!AreAliased(object, address, value, t8));
ASSERT(!AreAliased(object, address, value, t9));
if (emit_debug_code()) {
lw(at, MemOperand(address));
Assert(
eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
}
// Count number of write barriers in generated code.
isolate()->counters()->write_barriers_static()->Increment();
// TODO(mstarzinger): Dynamic counter missing.
// First, check if a write barrier is even needed. The tests below
// catch stores of smis and stores into the young generation.
Label done;
if (smi_check == INLINE_SMI_CHECK) {
ASSERT_EQ(0, kSmiTag);
JumpIfSmi(value, &done);
}
CheckPageFlag(value,
value, // Used as scratch.
MemoryChunk::kPointersToHereAreInterestingMask,
eq,
&done);
CheckPageFlag(object,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask,
eq,
&done);
// Record the actual write.
if (ra_status == kRAHasNotBeenSaved) {
push(ra);
}
RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
CallStub(&stub);
if (ra_status == kRAHasNotBeenSaved) {
pop(ra);
}
bind(&done);
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
li(address, Operand(BitCast<int32_t>(kZapValue + 12)));
li(value, Operand(BitCast<int32_t>(kZapValue + 16)));
}
}
void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Register address,
Register scratch,
SaveFPRegsMode fp_mode,
RememberedSetFinalAction and_then) {
Label done;
if (emit_debug_code()) {
Label ok;
JumpIfNotInNewSpace(object, scratch, &ok);
stop("Remembered set pointer is in new space");
bind(&ok);
}
// Load store buffer top.
ExternalReference store_buffer =
ExternalReference::store_buffer_top(isolate());
li(t8, Operand(store_buffer));
lw(scratch, MemOperand(t8));
// Store pointer to buffer and increment buffer top.
sw(address, MemOperand(scratch));
Addu(scratch, scratch, kPointerSize);
// Write back new top of buffer.
sw(scratch, MemOperand(t8));
// Call stub on end of buffer.
// Check for end of buffer.
And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
if (and_then == kFallThroughAtEnd) {
Branch(&done, eq, t8, Operand(zero_reg));
} else {
ASSERT(and_then == kReturnAtEnd);
Ret(eq, t8, Operand(zero_reg));
}
push(ra);
StoreBufferOverflowStub store_buffer_overflow =
StoreBufferOverflowStub(fp_mode);
CallStub(&store_buffer_overflow);
pop(ra);
bind(&done);
if (and_then == kReturnAtEnd) {
Ret();
}
}
// -----------------------------------------------------------------------------
// Allocation support.
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Register scratch,
Label* miss) {
Label same_contexts;
ASSERT(!holder_reg.is(scratch));
ASSERT(!holder_reg.is(at));
ASSERT(!scratch.is(at));
// Load current lexical context from the stack frame.
lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
// In debug mode, make sure the lexical context is set.
#ifdef DEBUG
Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
scratch, Operand(zero_reg));
#endif
// Load the native context of the current context.
int offset =
Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
lw(scratch, FieldMemOperand(scratch, offset));
lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
// Check the context is a native context.
if (emit_debug_code()) {
push(holder_reg); // Temporarily save holder on the stack.
// Read the first word and compare to the native_context_map.
lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
LoadRoot(at, Heap::kNativeContextMapRootIndex);
Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
holder_reg, Operand(at));
pop(holder_reg); // Restore holder.
}
// Check if both contexts are the same.
lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
Branch(&same_contexts, eq, scratch, Operand(at));
// Check the context is a native context.
if (emit_debug_code()) {
push(holder_reg); // Temporarily save holder on the stack.
mov(holder_reg, at); // Move at to its holding place.
LoadRoot(at, Heap::kNullValueRootIndex);
Check(ne, kJSGlobalProxyContextShouldNotBeNull,
holder_reg, Operand(at));
lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
LoadRoot(at, Heap::kNativeContextMapRootIndex);
Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
holder_reg, Operand(at));
// Restore at is not needed. at is reloaded below.
pop(holder_reg); // Restore holder.
// Restore at to holder's context.
lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
}
// Check that the security token in the calling global object is
// compatible with the security token in the receiving global
// object.
int token_offset = Context::kHeaderSize +
Context::SECURITY_TOKEN_INDEX * kPointerSize;
lw(scratch, FieldMemOperand(scratch, token_offset));
lw(at, FieldMemOperand(at, token_offset));
Branch(miss, ne, scratch, Operand(at));
bind(&same_contexts);
}
void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
// First of all we assign the hash seed to scratch.
LoadRoot(scratch, Heap::kHashSeedRootIndex);
SmiUntag(scratch);
// Xor original key with a seed.
xor_(reg0, reg0, scratch);
// Compute the hash code from the untagged key. This must be kept in sync
// with ComputeIntegerHash in utils.h.
//
// hash = ~hash + (hash << 15);
nor(scratch, reg0, zero_reg);
sll(at, reg0, 15);
addu(reg0, scratch, at);
// hash = hash ^ (hash >> 12);
srl(at, reg0, 12);
xor_(reg0, reg0, at);
// hash = hash + (hash << 2);
sll(at, reg0, 2);
addu(reg0, reg0, at);
// hash = hash ^ (hash >> 4);
srl(at, reg0, 4);
xor_(reg0, reg0, at);
// hash = hash * 2057;
sll(scratch, reg0, 11);
sll(at, reg0, 3);
addu(reg0, reg0, at);
addu(reg0, reg0, scratch);
// hash = hash ^ (hash >> 16);
srl(at, reg0, 16);
xor_(reg0, reg0, at);
}
void MacroAssembler::LoadFromNumberDictionary(Label* miss,
Register elements,
Register key,
Register result,
Register reg0,
Register reg1,
Register reg2) {
// Register use:
//
// elements - holds the slow-case elements of the receiver on entry.
// Unchanged unless 'result' is the same register.
//
// key - holds the smi key on entry.
// Unchanged unless 'result' is the same register.
//
//
// result - holds the result on exit if the load succeeded.
// Allowed to be the same as 'key' or 'result'.
// Unchanged on bailout so 'key' or 'result' can be used
// in further computation.
//
// Scratch registers:
//
// reg0 - holds the untagged key on entry and holds the hash once computed.
//
// reg1 - Used to hold the capacity mask of the dictionary.
//
// reg2 - Used for the index into the dictionary.
// at - Temporary (avoid MacroAssembler instructions also using 'at').
Label done;
GetNumberHash(reg0, reg1);
// Compute the capacity mask.
lw(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
sra(reg1, reg1, kSmiTagSize);
Subu(reg1, reg1, Operand(1));
// Generate an unrolled loop that performs a few probes before giving up.
for (int i = 0; i < kNumberDictionaryProbes; i++) {
// Use reg2 for index calculations and keep the hash intact in reg0.
mov(reg2, reg0);
// Compute the masked index: (hash + i + i * i) & mask.
if (i > 0) {
Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
}
and_(reg2, reg2, reg1);
// Scale the index by multiplying by the element size.
ASSERT(SeededNumberDictionary::kEntrySize == 3);
sll(at, reg2, 1); // 2x.
addu(reg2, reg2, at); // reg2 = reg2 * 3.
// Check if the key is identical to the name.
sll(at, reg2, kPointerSizeLog2);
addu(reg2, elements, at);
lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
if (i != kNumberDictionaryProbes - 1) {
Branch(&done, eq, key, Operand(at));
} else {
Branch(miss, ne, key, Operand(at));
}
}
bind(&done);
// Check that the value is a normal property.
// reg2: elements + (index * kPointerSize).
const int kDetailsOffset =
SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
Branch(miss, ne, at, Operand(zero_reg));
// Get the value at the masked, scaled index and return.
const int kValueOffset =
SeededNumberDictionary::kElementsStartOffset + kPointerSize;
lw(result, FieldMemOperand(reg2, kValueOffset));
}
// ---------------------------------------------------------------------------
// Instruction macros.
void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
addu(rd, rs, rt.rm());
} else {
if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
addiu(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
addu(rd, rs, at);
}
}
}
void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
subu(rd, rs, rt.rm());
} else {
if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
subu(rd, rs, at);
}
}
}
void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
if (kArchVariant == kLoongson) {
mult(rs, rt.rm());
mflo(rd);
} else {
mul(rd, rs, rt.rm());
}
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
if (kArchVariant == kLoongson) {
mult(rs, at);
mflo(rd);
} else {
mul(rd, rs, at);
}
}
}
void MacroAssembler::Mult(Register rs, const Operand& rt) {
if (rt.is_reg()) {
mult(rs, rt.rm());
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
mult(rs, at);
}
}
void MacroAssembler::Multu(Register rs, const Operand& rt) {
if (rt.is_reg()) {
multu(rs, rt.rm());
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
multu(rs, at);
}
}
void MacroAssembler::Div(Register rs, const Operand& rt) {
if (rt.is_reg()) {
div(rs, rt.rm());
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
div(rs, at);
}
}
void MacroAssembler::Divu(Register rs, const Operand& rt) {
if (rt.is_reg()) {
divu(rs, rt.rm());
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
divu(rs, at);
}
}
void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
and_(rd, rs, rt.rm());
} else {
if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
andi(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
and_(rd, rs, at);
}
}
}
void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
or_(rd, rs, rt.rm());
} else {
if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
ori(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
or_(rd, rs, at);
}
}
}
void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
xor_(rd, rs, rt.rm());
} else {
if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
xori(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
xor_(rd, rs, at);
}
}
}
void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
nor(rd, rs, rt.rm());
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
nor(rd, rs, at);
}
}
void MacroAssembler::Neg(Register rs, const Operand& rt) {
ASSERT(rt.is_reg());
ASSERT(!at.is(rs));
ASSERT(!at.is(rt.rm()));
li(at, -1);
xor_(rs, rt.rm(), at);
}
void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
slt(rd, rs, rt.rm());
} else {
if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
slti(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
slt(rd, rs, at);
}
}
}
void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
sltu(rd, rs, rt.rm());
} else {
if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
sltiu(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
sltu(rd, rs, at);
}
}
}
void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
if (kArchVariant == kMips32r2) {
if (rt.is_reg()) {
rotrv(rd, rs, rt.rm());
} else {
rotr(rd, rs, rt.imm32_);
}
} else {
if (rt.is_reg()) {
subu(at, zero_reg, rt.rm());
sllv(at, rs, at);
srlv(rd, rs, rt.rm());
or_(rd, rd, at);
} else {
if (rt.imm32_ == 0) {
srl(rd, rs, 0);
} else {
srl(at, rs, rt.imm32_);
sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
or_(rd, rd, at);
}
}
}
}
//------------Pseudo-instructions-------------
void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
AllowDeferredHandleDereference smi_check;
if (value->IsSmi()) {
li(dst, Operand(value), mode);
} else {
ASSERT(value->IsHeapObject());
if (isolate()->heap()->InNewSpace(*value)) {
Handle<Cell> cell = isolate()->factory()->NewCell(value);
li(dst, Operand(cell));
lw(dst, FieldMemOperand(dst, Cell::kValueOffset));
} else {
li(dst, Operand(value));
}
}
}
void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
ASSERT(!j.is_reg());
BlockTrampolinePoolScope block_trampoline_pool(this);
if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
// Normal load of an immediate value which does not need Relocation Info.
if (is_int16(j.imm32_)) {
addiu(rd, zero_reg, j.imm32_);
} else if (!(j.imm32_ & kHiMask)) {
ori(rd, zero_reg, j.imm32_);
} else if (!(j.imm32_ & kImm16Mask)) {
lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
} else {
lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
ori(rd, rd, (j.imm32_ & kImm16Mask));
}
} else {
if (MustUseReg(j.rmode_)) {
RecordRelocInfo(j.rmode_, j.imm32_);
}
// We always need the same number of instructions as we may need to patch
// this code to load another value which may need 2 instructions to load.
lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
ori(rd, rd, (j.imm32_ & kImm16Mask));
}
}
void MacroAssembler::MultiPush(RegList regs) {
int16_t num_to_push = NumberOfBitsSet(regs);
int16_t stack_offset = num_to_push * kPointerSize;
Subu(sp, sp, Operand(stack_offset));
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
stack_offset -= kPointerSize;
sw(ToRegister(i), MemOperand(sp, stack_offset));
}
}
}
void MacroAssembler::MultiPushReversed(RegList regs) {
int16_t num_to_push = NumberOfBitsSet(regs);
int16_t stack_offset = num_to_push * kPointerSize;
Subu(sp, sp, Operand(stack_offset));
for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
stack_offset -= kPointerSize;
sw(ToRegister(i), MemOperand(sp, stack_offset));
}
}
}
void MacroAssembler::MultiPop(RegList regs) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
lw(ToRegister(i), MemOperand(sp, stack_offset));
stack_offset += kPointerSize;
}
}
addiu(sp, sp, stack_offset);
}
void MacroAssembler::MultiPopReversed(RegList regs) {
int16_t stack_offset = 0;
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
lw(ToRegister(i), MemOperand(sp, stack_offset));
stack_offset += kPointerSize;
}
}
addiu(sp, sp, stack_offset);
}
void MacroAssembler::MultiPushFPU(RegList regs) {
int16_t num_to_push = NumberOfBitsSet(regs);
int16_t stack_offset = num_to_push * kDoubleSize;
Subu(sp, sp, Operand(stack_offset));
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
stack_offset -= kDoubleSize;
sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
}
}
}
void MacroAssembler::MultiPushReversedFPU(RegList regs) {
int16_t num_to_push = NumberOfBitsSet(regs);
int16_t stack_offset = num_to_push * kDoubleSize;
Subu(sp, sp, Operand(stack_offset));
for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
stack_offset -= kDoubleSize;
sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
}
}
}
void MacroAssembler::MultiPopFPU(RegList regs) {
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
stack_offset += kDoubleSize;
}
}
addiu(sp, sp, stack_offset);
}
void MacroAssembler::MultiPopReversedFPU(RegList regs) {
int16_t stack_offset = 0;
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
stack_offset += kDoubleSize;
}
}
addiu(sp, sp, stack_offset);
}
void MacroAssembler::FlushICache(Register address, unsigned instructions) {
RegList saved_regs = kJSCallerSaved | ra.bit();
MultiPush(saved_regs);
AllowExternalCallThatCantCauseGC scope(this);
// Save to a0 in case address == t0.
Move(a0, address);
PrepareCallCFunction(2, t0);
li(a1, instructions * kInstrSize);
CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
MultiPop(saved_regs);
}
void MacroAssembler::Ext(Register rt,
Register rs,
uint16_t pos,
uint16_t size) {
ASSERT(pos < 32);
ASSERT(pos + size < 33);
if (kArchVariant == kMips32r2) {
ext_(rt, rs, pos, size);
} else {
// Move rs to rt and shift it left then right to get the
// desired bitfield on the right side and zeroes on the left.
int shift_left = 32 - (pos + size);
sll(rt, rs, shift_left); // Acts as a move if shift_left == 0.
int shift_right = 32 - size;
if (shift_right > 0) {
srl(rt, rt, shift_right);
}
}
}
void MacroAssembler::Ins(Register rt,
Register rs,
uint16_t pos,
uint16_t size) {
ASSERT(pos < 32);
ASSERT(pos + size <= 32);
ASSERT(size != 0);
if (kArchVariant == kMips32r2) {
ins_(rt, rs, pos, size);
} else {
ASSERT(!rt.is(t8) && !rs.is(t8));
Subu(at, zero_reg, Operand(1));
srl(at, at, 32 - size);
and_(t8, rs, at);
sll(t8, t8, pos);
sll(at, at, pos);
nor(at, at, zero_reg);
and_(at, rt, at);
or_(rt, t8, at);
}
}
void MacroAssembler::Cvt_d_uw(FPURegister fd,
FPURegister fs,
FPURegister scratch) {
// Move the data from fs to t8.
mfc1(t8, fs);
Cvt_d_uw(fd, t8, scratch);
}
void MacroAssembler::Cvt_d_uw(FPURegister fd,
Register rs,
FPURegister scratch) {
// Convert rs to a FP value in fd (and fd + 1).
// We do this by converting rs minus the MSB to avoid sign conversion,
// then adding 2^31 to the result (if needed).
ASSERT(!fd.is(scratch));
ASSERT(!rs.is(t9));
ASSERT(!rs.is(at));
// Save rs's MSB to t9.
Ext(t9, rs, 31, 1);
// Remove rs's MSB.
Ext(at, rs, 0, 31);
// Move the result to fd.
mtc1(at, fd);
// Convert fd to a real FP value.
cvt_d_w(fd, fd);
Label conversion_done;
// If rs's MSB was 0, it's done.
// Otherwise we need to add that to the FP register.
Branch(&conversion_done, eq, t9, Operand(zero_reg));
// Load 2^31 into f20 as its float representation.
li(at, 0x41E00000);
mtc1(at, FPURegister::from_code(scratch.code() + 1));
mtc1(zero_reg, scratch);
// Add it to fd.
add_d(fd, fd, scratch);
bind(&conversion_done);
}
void MacroAssembler::Trunc_uw_d(FPURegister fd,
FPURegister fs,
FPURegister scratch) {
Trunc_uw_d(fs, t8, scratch);
mtc1(t8, fd);
}
void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
if (kArchVariant == kLoongson && fd.is(fs)) {
mfc1(t8, FPURegister::from_code(fs.code() + 1));
trunc_w_d(fd, fs);
mtc1(t8, FPURegister::from_code(fs.code() + 1));
} else {
trunc_w_d(fd, fs);
}
}
void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
if (kArchVariant == kLoongson && fd.is(fs)) {
mfc1(t8, FPURegister::from_code(fs.code() + 1));
round_w_d(fd, fs);
mtc1(t8, FPURegister::from_code(fs.code() + 1));
} else {
round_w_d(fd, fs);
}
}
void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
if (kArchVariant == kLoongson && fd.is(fs)) {
mfc1(t8, FPURegister::from_code(fs.code() + 1));
floor_w_d(fd, fs);
mtc1(t8, FPURegister::from_code(fs.code() + 1));
} else {
floor_w_d(fd, fs);
}
}
void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
if (kArchVariant == kLoongson && fd.is(fs)) {
mfc1(t8, FPURegister::from_code(fs.code() + 1));
ceil_w_d(fd, fs);
mtc1(t8, FPURegister::from_code(fs.code() + 1));
} else {
ceil_w_d(fd, fs);
}
}
void MacroAssembler::Trunc_uw_d(FPURegister fd,
Register rs,
FPURegister scratch) {
ASSERT(!fd.is(scratch));
ASSERT(!rs.is(at));
// Load 2^31 into scratch as its float representation.
li(at, 0x41E00000);
mtc1(at, FPURegister::from_code(scratch.code() + 1));
mtc1(zero_reg, scratch);
// Test if scratch > fd.
// If fd < 2^31 we can convert it normally.
Label simple_convert;
BranchF(&simple_convert, NULL, lt, fd, scratch);
// First we subtract 2^31 from fd, then trunc it to rs
// and add 2^31 to rs.
sub_d(scratch, fd, scratch);
trunc_w_d(scratch, scratch);
mfc1(rs, scratch);
Or(rs, rs, 1 << 31);
Label done;
Branch(&done);
// Simple conversion.
bind(&simple_convert);
trunc_w_d(scratch, fd);
mfc1(rs, scratch);
bind(&done);
}
void MacroAssembler::BranchF(Label* target,
Label* nan,
Condition cc,
FPURegister cmp1,
FPURegister cmp2,
BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (cc == al) {
Branch(bd, target);
return;
}
ASSERT(nan || target);
// Check for unordered (NaN) cases.
if (nan) {
c(UN, D, cmp1, cmp2);
bc1t(nan);
}
if (target) {
// Here NaN cases were either handled by this function or are assumed to
// have been handled by the caller.
// Unsigned conditions are treated as their signed counterpart.
switch (cc) {
case lt:
c(OLT, D, cmp1, cmp2);
bc1t(target);
break;
case gt:
c(ULE, D, cmp1, cmp2);
bc1f(target);
break;
case ge:
c(ULT, D, cmp1, cmp2);
bc1f(target);
break;
case le:
c(OLE, D, cmp1, cmp2);
bc1t(target);
break;
case eq:
c(EQ, D, cmp1, cmp2);
bc1t(target);
break;
case ueq:
c(UEQ, D, cmp1, cmp2);
bc1t(target);
break;
case ne:
c(EQ, D, cmp1, cmp2);
bc1f(target);
break;
case nue:
c(UEQ, D, cmp1, cmp2);
bc1f(target);
break;
default:
CHECK(0);
};
}
if (bd == PROTECT) {
nop();
}
}
void MacroAssembler::Move(FPURegister dst, double imm) {
static const DoubleRepresentation minus_zero(-0.0);
static const DoubleRepresentation zero(0.0);
DoubleRepresentation value(imm);
// Handle special values first.
bool force_load = dst.is(kDoubleRegZero);
if (value.bits == zero.bits && !force_load) {
mov_d(dst, kDoubleRegZero);
} else if (value.bits == minus_zero.bits && !force_load) {
neg_d(dst, kDoubleRegZero);
} else {
uint32_t lo, hi;
DoubleAsTwoUInt32(imm, &lo, &hi);
// Move the low part of the double into the lower of the corresponding FPU
// register of FPU register pair.
if (lo != 0) {
li(at, Operand(lo));
mtc1(at, dst);
} else {
mtc1(zero_reg, dst);
}
// Move the high part of the double into the higher of the corresponding FPU
// register of FPU register pair.
if (hi != 0) {
li(at, Operand(hi));
mtc1(at, dst.high());
} else {
mtc1(zero_reg, dst.high());
}
}
}
void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
if (kArchVariant == kLoongson) {
Label done;
Branch(&done, ne, rt, Operand(zero_reg));
mov(rd, rs);
bind(&done);
} else {
movz(rd, rs, rt);
}
}
void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
if (kArchVariant == kLoongson) {
Label done;
Branch(&done, eq, rt, Operand(zero_reg));
mov(rd, rs);
bind(&done);
} else {
movn(rd, rs, rt);
}
}
void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
if (kArchVariant == kLoongson) {
// Tests an FP condition code and then conditionally move rs to rd.
// We do not currently use any FPU cc bit other than bit 0.
ASSERT(cc == 0);
ASSERT(!(rs.is(t8) || rd.is(t8)));
Label done;
Register scratch = t8;
// For testing purposes we need to fetch content of the FCSR register and
// than test its cc (floating point condition code) bit (for cc = 0, it is
// 24. bit of the FCSR).
cfc1(scratch, FCSR);
// For the MIPS I, II and III architectures, the contents of scratch is
// UNPREDICTABLE for the instruction immediately following CFC1.
nop();
srl(scratch, scratch, 16);
andi(scratch, scratch, 0x0080);
Branch(&done, eq, scratch, Operand(zero_reg));
mov(rd, rs);
bind(&done);
} else {
movt(rd, rs, cc);
}
}
void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
if (kArchVariant == kLoongson) {
// Tests an FP condition code and then conditionally move rs to rd.
// We do not currently use any FPU cc bit other than bit 0.
ASSERT(cc == 0);
ASSERT(!(rs.is(t8) || rd.is(t8)));
Label done;
Register scratch = t8;
// For testing purposes we need to fetch content of the FCSR register and
// than test its cc (floating point condition code) bit (for cc = 0, it is
// 24. bit of the FCSR).
cfc1(scratch, FCSR);
// For the MIPS I, II and III architectures, the contents of scratch is
// UNPREDICTABLE for the instruction immediately following CFC1.
nop();
srl(scratch, scratch, 16);
andi(scratch, scratch, 0x0080);
Branch(&done, ne, scratch, Operand(zero_reg));
mov(rd, rs);
bind(&done);
} else {
movf(rd, rs, cc);
}
}
void MacroAssembler::Clz(Register rd, Register rs) {
if (kArchVariant == kLoongson) {
ASSERT(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
Register mask = t8;
Register scratch = t9;
Label loop, end;
mov(at, rs);
mov(rd, zero_reg);
lui(mask, 0x8000);
bind(&loop);
and_(scratch, at, mask);
Branch(&end, ne, scratch, Operand(zero_reg));
addiu(rd, rd, 1);
Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT);
srl(mask, mask, 1);
bind(&end);
} else {
clz(rd, rs);
}
}
void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
Register result,
DoubleRegister double_input,
Register scratch,
DoubleRegister double_scratch,
Register except_flag,
CheckForInexactConversion check_inexact) {
ASSERT(!result.is(scratch));
ASSERT(!double_input.is(double_scratch));
ASSERT(!except_flag.is(scratch));
Label done;
// Clear the except flag (0 = no exception)
mov(except_flag, zero_reg);
// Test for values that can be exactly represented as a signed 32-bit integer.
cvt_w_d(double_scratch, double_input);
mfc1(result, double_scratch);
cvt_d_w(double_scratch, double_scratch);
BranchF(&done, NULL, eq, double_input, double_scratch);
int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
if (check_inexact == kDontCheckForInexactConversion) {
// Ignore inexact exceptions.
except_mask &= ~kFCSRInexactFlagMask;
}
// Save FCSR.
cfc1(scratch, FCSR);
// Disable FPU exceptions.
ctc1(zero_reg, FCSR);
// Do operation based on rounding mode.
switch (rounding_mode) {
case kRoundToNearest:
Round_w_d(double_scratch, double_input);
break;
case kRoundToZero:
Trunc_w_d(double_scratch, double_input);
break;
case kRoundToPlusInf:
Ceil_w_d(double_scratch, double_input);
break;
case kRoundToMinusInf:
Floor_w_d(double_scratch, double_input);
break;
} // End of switch-statement.
// Retrieve FCSR.
cfc1(except_flag, FCSR);
// Restore FCSR.
ctc1(scratch, FCSR);
// Move the converted value into the result register.
mfc1(result, double_scratch);
// Check for fpu exceptions.
And(except_flag, except_flag, Operand(except_mask));
bind(&done);
}
void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
DoubleRegister double_input,
Label* done) {
DoubleRegister single_scratch = kLithiumScratchDouble.low();
Register scratch = at;
Register scratch2 = t9;
// Clear cumulative exception flags and save the FCSR.
cfc1(scratch2, FCSR);
ctc1(zero_reg, FCSR);
// Try a conversion to a signed integer.
trunc_w_d(single_scratch, double_input);
mfc1(result, single_scratch);
// Retrieve and restore the FCSR.
cfc1(scratch, FCSR);
ctc1(scratch2, FCSR);
// Check for overflow and NaNs.
And(scratch,
scratch,
kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
// If we had no exceptions we are done.
Branch(done, eq, scratch, Operand(zero_reg));
}
void MacroAssembler::TruncateDoubleToI(Register result,
DoubleRegister double_input) {
Label done;
TryInlineTruncateDoubleToI(result, double_input, &done);
// If we fell through then inline version didn't succeed - call stub instead.
push(ra);
Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
sdc1(double_input, MemOperand(sp, 0));
DoubleToIStub stub(sp, result, 0, true, true);
CallStub(&stub);
Addu(sp, sp, Operand(kDoubleSize));
pop(ra);
bind(&done);
}
void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
Label done;
DoubleRegister double_scratch = f12;
ASSERT(!result.is(object));
ldc1(double_scratch,
MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
TryInlineTruncateDoubleToI(result, double_scratch, &done);
// If we fell through then inline version didn't succeed - call stub instead.
push(ra);
DoubleToIStub stub(object,
result,
HeapNumber::kValueOffset - kHeapObjectTag,
true,
true);
CallStub(&stub);
pop(ra);
bind(&done);
}
void MacroAssembler::TruncateNumberToI(Register object,
Register result,
Register heap_number_map,
Register scratch,
Label* not_number) {
Label done;
ASSERT(!result.is(object));
UntagAndJumpIfSmi(result, object, &done);
JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
TruncateHeapNumberToI(result, object);
bind(&done);
}
void MacroAssembler::GetLeastBitsFromSmi(Register dst,
Register src,
int num_least_bits) {
Ext(dst, src, kSmiTagSize, num_least_bits);
}
void MacroAssembler::GetLeastBitsFromInt32(Register dst,
Register src,
int num_least_bits) {
And(dst, src, Operand((1 << num_least_bits) - 1));
}
// Emulated condtional branches do not emit a nop in the branch delay slot.
//
// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
(cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
(cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
BranchShort(offset, bdslot);
}
void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
BranchShort(offset, cond, rs, rt, bdslot);
}
void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
if (L->is_bound()) {
if (is_near(L)) {
BranchShort(L, bdslot);
} else {
Jr(L, bdslot);
}
} else {
if (is_trampoline_emitted()) {
Jr(L, bdslot);
} else {
BranchShort(L, bdslot);
}
}
}
void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
if (L->is_bound()) {
if (is_near(L)) {
BranchShort(L, cond, rs, rt, bdslot);
} else {
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
Jr(L, bdslot);
bind(&skip);
}
} else {
if (is_trampoline_emitted()) {
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
Jr(L, bdslot);
bind(&skip);
} else {
BranchShort(L, cond, rs, rt, bdslot);
}
}
}
void MacroAssembler::Branch(Label* L,
Condition cond,
Register rs,
Heap::RootListIndex index,
BranchDelaySlot bdslot) {
LoadRoot(at, index);
Branch(L, cond, rs, Operand(at), bdslot);
}
void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
b(offset);
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
}
void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
BRANCH_ARGS_CHECK(cond, rs, rt);
ASSERT(!rs.is(zero_reg));
Register r2 = no_reg;
Register scratch = at;
if (rt.is_reg()) {
// NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
// rt.
BlockTrampolinePoolScope block_trampoline_pool(this);
r2 = rt.rm_;
switch (cond) {
case cc_always:
b(offset);
break;
case eq:
beq(rs, r2, offset);
break;
case ne:
bne(rs, r2, offset);
break;
// Signed comparison.
case greater:
if (r2.is(zero_reg)) {
bgtz(rs, offset);
} else {
slt(scratch, r2, rs);
bne(scratch, zero_reg, offset);
}
break;
case greater_equal:
if (r2.is(zero_reg)) {
bgez(rs, offset);
} else {
slt(scratch, rs, r2);
beq(scratch, zero_reg, offset);
}
break;
case less:
if (r2.is(zero_reg)) {
bltz(rs, offset);
} else {
slt(scratch, rs, r2);
bne(scratch, zero_reg, offset);
}
break;
case less_equal:
if (r2.is(zero_reg)) {
blez(rs, offset);
} else {
slt(scratch, r2, rs);
beq(scratch, zero_reg, offset);
}
break;
// Unsigned comparison.
case Ugreater:
if (r2.is(zero_reg)) {
bgtz(rs, offset);
} else {
sltu(scratch, r2, rs);
bne(scratch, zero_reg, offset);
}
break;
case Ugreater_equal:
if (r2.is(zero_reg)) {
bgez(rs, offset);
} else {
sltu(scratch, rs, r2);
beq(scratch, zero_reg, offset);
}
break;
case Uless:
if (r2.is(zero_reg)) {
// No code needs to be emitted.
return;
} else {
sltu(scratch, rs, r2);
bne(scratch, zero_reg, offset);
}
break;
case Uless_equal:
if (r2.is(zero_reg)) {
b(offset);
} else {
sltu(scratch, r2, rs);
beq(scratch, zero_reg, offset);
}
break;
default:
UNREACHABLE();
}
} else {
// Be careful to always use shifted_branch_offset only just before the
// branch instruction, as the location will be remember for patching the
// target.
BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
b(offset);
break;
case eq:
// We don't want any other register but scratch clobbered.
ASSERT(!scratch.is(rs));
r2 = scratch;
li(r2, rt);
beq(rs, r2, offset);
break;
case ne:
// We don't want any other register but scratch clobbered.
ASSERT(!scratch.is(rs));
r2 = scratch;
li(r2, rt);
bne(rs, r2, offset);
break;
// Signed comparison.
case greater:
if (rt.imm32_ == 0) {
bgtz(rs, offset);
} else {
r2 = scratch;
li(r2, rt);
slt(scratch, r2, rs);
bne(scratch, zero_reg, offset);
}
break;
case greater_equal:
if (rt.imm32_ == 0) {
bgez(rs, offset);
} else if (is_int16(rt.imm32_)) {
slti(scratch, rs, rt.imm32_);
beq(scratch, zero_reg, offset);
} else {
r2 = scratch;
li(r2, rt);
slt(scratch, rs, r2);
beq(scratch, zero_reg, offset);
}
break;
case less:
if (rt.imm32_ == 0) {
bltz(rs, offset);
} else if (is_int16(rt.imm32_)) {
slti(scratch, rs, rt.imm32_);
bne(scratch, zero_reg, offset);
} else {
r2 = scratch;
li(r2, rt);
slt(scratch, rs, r2);
bne(scratch, zero_reg, offset);
}
break;
case less_equal:
if (rt.imm32_ == 0) {
blez(rs, offset);
} else {
r2 = scratch;
li(r2, rt);
slt(scratch, r2, rs);
beq(scratch, zero_reg, offset);
}
break;
// Unsigned comparison.
case Ugreater:
if (rt.imm32_ == 0) {
bgtz(rs, offset);
} else {
r2 = scratch;
li(r2, rt);
sltu(scratch, r2, rs);
bne(scratch, zero_reg, offset);
}
break;
case Ugreater_equal:
if (rt.imm32_ == 0) {
bgez(rs, offset);
} else if (is_int16(rt.imm32_)) {
sltiu(scratch, rs, rt.imm32_);
beq(scratch, zero_reg, offset);
} else {
r2 = scratch;
li(r2, rt);
sltu(scratch, rs, r2);
beq(scratch, zero_reg, offset);
}
break;
case Uless:
if (rt.imm32_ == 0) {
// No code needs to be emitted.
return;
} else if (is_int16(rt.imm32_)) {
sltiu(scratch, rs, rt.imm32_);
bne(scratch, zero_reg, offset);
} else {
r2 = scratch;
li(r2, rt);
sltu(scratch, rs, r2);
bne(scratch, zero_reg, offset);
}
break;
case Uless_equal:
if (rt.imm32_ == 0) {
b(offset);
} else {
r2 = scratch;
li(r2, rt);
sltu(scratch, r2, rs);
beq(scratch, zero_reg, offset);
}
break;
default:
UNREACHABLE();
}
}
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
}
void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
// We use branch_offset as an argument for the branch instructions to be sure
// it is called just before generating the branch instruction, as needed.
b(shifted_branch_offset(L, false));
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
}
void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
BRANCH_ARGS_CHECK(cond, rs, rt);
int32_t offset = 0;
Register r2 = no_reg;
Register scratch = at;
if (rt.is_reg()) {
BlockTrampolinePoolScope block_trampoline_pool(this);
r2 = rt.rm_;
// Be careful to always use shifted_branch_offset only just before the
// branch instruction, as the location will be remember for patching the
// target.
switch (cond) {
case cc_always:
offset = shifted_branch_offset(L, false);
b(offset);
break;
case eq:
offset = shifted_branch_offset(L, false);
beq(rs, r2, offset);
break;
case ne:
offset = shifted_branch_offset(L, false);
bne(rs, r2, offset);
break;
// Signed comparison.
case greater:
if (r2.is(zero_reg)) {
offset = shifted_branch_offset(L, false);
bgtz(rs, offset);
} else {
slt(scratch, r2, rs);
offset = shifted_branch_offset(L, false);
bne(scratch, zero_reg, offset);
}
break;
case greater_equal:
if (r2.is(zero_reg)) {
offset = shifted_branch_offset(L, false);
bgez(rs, offset);
} else {
slt(scratch, rs, r2);
offset = shifted_branch_offset(L, false);
beq(scratch, zero_reg, offset);
}
break;
case less:
if (r2.is(zero_reg)) {
offset = shifted_branch_offset(L, false);
bltz(rs, offset);
} else {
slt(scratch, rs, r2);
offset = shifted_branch_offset(L, false);
bne(scratch, zero_reg, offset);
}
break;
case less_equal:
if (r2.is(zero_reg)) {
offset = shifted_branch_offset(L, false);
blez(rs, offset);
} else {
slt(scratch, r2, rs);
offset = shifted_branch_offset(L, false);
beq(scratch, zero_reg, offset);
}
break;
// Unsigned comparison.
case Ugreater:
if (r2.is(zero_reg)) {
offset = shifted_branch_offset(L, false);
bgtz(rs, offset);
} else {
sltu(scratch, r2, rs);
offset = shifted_branch_offset(L, false);
bne(scratch, zero_reg, offset);
}
break;
case Ugreater_equal:
if (r2.is(zero_reg)) {
offset = shifted_branch_offset(L, false);
bgez(rs, offset);
} else {
sltu(scratch, rs, r2);
offset = shifted_branch_offset(L, false);
beq(scratch, zero_reg, offset);
}
break;
case Uless:
if (r2.is(zero_reg)) {
// No code needs to be emitted.
return;
} else {
sltu(scratch, rs, r2);
offset = shifted_branch_offset(L, false);
bne(scratch, zero_reg, offset);
}
break;
case Uless_equal:
if (r2.is(zero_reg)) {
offset = shifted_branch_offset(L, false);
b(offset);
} else {
sltu(scratch, r2, rs);
offset = shifted_branch_offset(L, false);
beq(scratch, zero_reg, offset);
}
break;
default:
UNREACHABLE();
}
} else {
// Be careful to always use shifted_branch_offset only just before the
// branch instruction, as the location will be remember for patching the
// target.
BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
offset = shifted_branch_offset(L, false);
b(offset);
break;
case eq:
ASSERT(!scratch.is(rs));
r2 = scratch;
li(r2, rt);
offset = shifted_branch_offset(L, false);
beq(rs, r2, offset);
break;
case ne:
ASSERT(!scratch.is(rs));
r2 = scratch;
li(r2, rt);
offset = shifted_branch_offset(L, false);
bne(rs, r2, offset);
break;
// Signed comparison.
case greater:
if (rt.imm32_ == 0) {
offset = shifted_branch_offset(L, false);
bgtz(rs, offset);
} else {
ASSERT(!scratch.is(rs));
r2 = scratch;
li(r2, rt);
slt(scratch, r2, rs);
offset = shifted_branch_offset(L, false);
bne(scratch, zero_reg, offset);
}
break;
case greater_equal:
if (rt.imm32_ == 0) {
offset = shifted_branch_offset(L, false);
bgez(rs, offset);
} else if (is_int16(rt.imm32_)) {
slti(scratch, rs, rt.imm32_);
offset = shifted_branch_offset(L, false);
beq(scratch, zero_reg, offset);
} else {
ASSERT(!scratch.is(rs));
r2 = scratch;
li(r2, rt);
slt(scratch, rs, r2);
offset = shifted_branch_offset(L, false);
beq(scratch, zero_reg, offset);
}
break;
case less:
if (rt.imm32_ == 0) {
offset = shifted_branch_offset(L, false);
bltz(rs, offset);
} else if (is_int16(rt.imm32_)) {
slti(scratch, rs, rt.imm32_);
offset = shifted_branch_offset(L, false);
bne(scratch, zero_reg, offset);
} else {
ASSERT(!scratch.is(rs));
r2 = scratch;
li(r2, rt);
slt(scratch, rs, r2);
offset = shifted_branch_offset(L, false);
bne(scratch, zero_reg, offset);
}
break;
case less_equal:
if (rt.imm32_ == 0) {
offset = shifted_branch_offset(L, false);
blez(rs, offset);
} else {
ASSERT(!scratch.is(rs));
r2 = scratch;
li(r2, rt);
slt(scratch, r2, rs);
offset = shifted_branch_offset(L, false);
beq(scratch, zero_reg, offset);
}
break;
// Unsigned comparison.
case Ugreater:
if (rt.imm32_ == 0) {
offset = shifted_branch_offset(L, false);
bgtz(rs, offset);
} else {
ASSERT(!scratch.is(rs));
r2 = scratch;
li(r2, rt);
sltu(scratch, r2, rs);
offset = shifted_branch_offset(L, false);
bne(scratch, zero_reg, offset);
}
break;
case Ugreater_equal:
if (rt.imm32_ == 0) {
offset = shifted_branch_offset(L, false);
bgez(rs, offset);
} else if (is_int16(rt.imm32_)) {
sltiu(scratch, rs, rt.imm32_);
offset = shifted_branch_offset(L, false);
beq(scratch, zero_reg, offset);
} else {
ASSERT(!scratch.is(rs));
r2 = scratch;
li(r2, rt);
sltu(scratch, rs, r2);
offset = shifted_branch_offset(L, false);
beq(scratch, zero_reg, offset);
}
break;
case Uless:
if (rt.imm32_ == 0) {
// No code needs to be emitted.
return;
} else if (is_int16(rt.imm32_)) {
sltiu(scratch, rs, rt.imm32_);
offset = shifted_branch_offset(L, false);
bne(scratch, zero_reg, offset);
} else {
ASSERT(!scratch.is(rs));
r2 = scratch;
li(r2, rt);
sltu(scratch, rs, r2);
offset = shifted_branch_offset(L, false);
bne(scratch, zero_reg, offset);
}
break;
case Uless_equal:
if (rt.imm32_ == 0) {
offset = shifted_branch_offset(L, false);
b(offset);
} else {
ASSERT(!scratch.is(rs));
r2 = scratch;
li(r2, rt);
sltu(scratch, r2, rs);
offset = shifted_branch_offset(L, false);
beq(scratch, zero_reg, offset);
}
break;
default:
UNREACHABLE();
}
}
// Check that offset could actually hold on an int16_t.
ASSERT(is_int16(offset));
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
}
void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
BranchAndLinkShort(offset, bdslot);
}
void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
BranchAndLinkShort(offset, cond, rs, rt, bdslot);
}
void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
if (L->is_bound()) {
if (is_near(L)) {
BranchAndLinkShort(L, bdslot);
} else {
Jalr(L, bdslot);
}
} else {
if (is_trampoline_emitted()) {
Jalr(L, bdslot);
} else {
BranchAndLinkShort(L, bdslot);
}
}
}
void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
if (L->is_bound()) {
if (is_near(L)) {
BranchAndLinkShort(L, cond, rs, rt, bdslot);
} else {
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
Jalr(L, bdslot);
bind(&skip);
}
} else {
if (is_trampoline_emitted()) {
Label skip;
Condition neg_cond = NegateCondition(cond);
BranchShort(&skip, neg_cond, rs, rt);
Jalr(L, bdslot);
bind(&skip);
} else {
BranchAndLinkShort(L, cond, rs, rt, bdslot);
}
}
}
// We need to use a bgezal or bltzal, but they can't be used directly with the
// slt instructions. We could use sub or add instead but we would miss overflow
// cases, so we keep slt and add an intermediate third instruction.
void MacroAssembler::BranchAndLinkShort(int16_t offset,
BranchDelaySlot bdslot) {
bal(offset);
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
}
void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
Register rs, const Operand& rt,
BranchDelaySlot bdslot) {
BRANCH_ARGS_CHECK(cond, rs, rt);
Register r2 = no_reg;
Register scratch = at;
if (rt.is_reg()) {
r2 = rt.rm_;
} else if (cond != cc_always) {
r2 = scratch;
li(r2, rt);
}
{
BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
bal(offset);
break;
case eq:
bne(rs, r2, 2);
nop();
bal(offset);
break;
case ne:
beq(rs, r2, 2);
nop();
bal(offset);
break;
// Signed comparison.
case greater:
slt(scratch, r2, rs);
addiu(scratch, scratch, -1);
bgezal(scratch, offset);
break;
case greater_equal:
slt(scratch, rs, r2);
addiu(scratch, scratch, -1);
bltzal(scratch, offset);
break;
case less:
slt(scratch, rs, r2);
addiu(scratch, scratch, -1);
bgezal(scratch, offset);
break;
case less_equal:
slt(scratch, r2, rs);
addiu(scratch, scratch, -1);
bltzal(scratch, offset);
break;
// Unsigned comparison.
case Ugreater:
sltu(scratch, r2, rs);
addiu(scratch, scratch, -1);
bgezal(scratch, offset);
break;
case Ugreater_equal:
sltu(scratch, rs, r2);
addiu(scratch, scratch, -1);
bltzal(scratch, offset);
break;
case Uless:
sltu(scratch, rs, r2);
addiu(scratch, scratch, -1);
bgezal(scratch, offset);
break;
case Uless_equal:
sltu(scratch, r2, rs);
addiu(scratch, scratch, -1);
bltzal(scratch, offset);
break;
default:
UNREACHABLE();
}
}
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
}
void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
bal(shifted_branch_offset(L, false));
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
}
void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
const Operand& rt,
BranchDelaySlot bdslot) {
BRANCH_ARGS_CHECK(cond, rs, rt);
int32_t offset = 0;
Register r2 = no_reg;
Register scratch = at;
if (rt.is_reg()) {
r2 = rt.rm_;
} else if (cond != cc_always) {
r2 = scratch;
li(r2, rt);
}
{
BlockTrampolinePoolScope block_trampoline_pool(this);
switch (cond) {
case cc_always:
offset = shifted_branch_offset(L, false);
bal(offset);
break;
case eq:
bne(rs, r2, 2);
nop();
offset = shifted_branch_offset(L, false);
bal(offset);
break;
case ne:
beq(rs, r2, 2);
nop();
offset = shifted_branch_offset(L, false);
bal(offset);
break;
// Signed comparison.
case greater:
slt(scratch, r2, rs);
addiu(scratch, scratch, -1);
offset = shifted_branch_offset(L, false);
bgezal(scratch, offset);
break;
case greater_equal:
slt(scratch, rs, r2);
addiu(scratch, scratch, -1);
offset = shifted_branch_offset(L, false);
bltzal(scratch, offset);
break;
case less:
slt(scratch, rs, r2);
addiu(scratch, scratch, -1);
offset = shifted_branch_offset(L, false);
bgezal(scratch, offset);
break;
case less_equal:
slt(scratch, r2, rs);
addiu(scratch, scratch, -1);
offset = shifted_branch_offset(L, false);
bltzal(scratch, offset);
break;
// Unsigned comparison.
case Ugreater:
sltu(scratch, r2, rs);
addiu(scratch, scratch, -1);
offset = shifted_branch_offset(L, false);
bgezal(scratch, offset);
break;
case Ugreater_equal:
sltu(scratch, rs, r2);
addiu(scratch, scratch, -1);
offset = shifted_branch_offset(L, false);
bltzal(scratch, offset);
break;
case Uless:
sltu(scratch, rs, r2);
addiu(scratch, scratch, -1);
offset = shifted_branch_offset(L, false);
bgezal(scratch, offset);
break;
case Uless_equal:
sltu(scratch, r2, rs);
addiu(scratch, scratch, -1);
offset = shifted_branch_offset(L, false);
bltzal(scratch, offset);
break;
default:
UNREACHABLE();
}
}
// Check that offset could actually hold on an int16_t.
ASSERT(is_int16(offset));
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
}
void MacroAssembler::Jump(Register target,
Condition cond,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (cond == cc_always) {
jr(target);
} else {
BRANCH_ARGS_CHECK(cond, rs, rt);
Branch(2, NegateCondition(cond), rs, rt);
jr(target);
}
// Emit a nop in the branch delay slot if required.
if (bd == PROTECT)
nop();
}
void MacroAssembler::Jump(intptr_t target,
RelocInfo::Mode rmode,
Condition cond,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
Label skip;
if (cond != cc_always) {
Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
}
// The first instruction of 'li' may be placed in the delay slot.
// This is not an issue, t9 is expected to be clobbered anyway.
li(t9, Operand(target, rmode));
Jump(t9, al, zero_reg, Operand(zero_reg), bd);
bind(&skip);
}
void MacroAssembler::Jump(Address target,
RelocInfo::Mode rmode,
Condition cond,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
ASSERT(!RelocInfo::IsCodeTarget(rmode));
Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
}
void MacroAssembler::Jump(Handle<Code> code,
RelocInfo::Mode rmode,
Condition cond,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
ASSERT(RelocInfo::IsCodeTarget(rmode));
AllowDeferredHandleDereference embedding_raw_address;
Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
}
int MacroAssembler::CallSize(Register target,
Condition cond,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
int size = 0;
if (cond == cc_always) {
size += 1;
} else {
size += 3;
}
if (bd == PROTECT)
size += 1;
return size * kInstrSize;
}
// Note: To call gcc-compiled C code on mips, you must call thru t9.
void MacroAssembler::Call(Register target,
Condition cond,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Label start;
bind(&start);
if (cond == cc_always) {
jalr(target);
} else {
BRANCH_ARGS_CHECK(cond, rs, rt);
Branch(2, NegateCondition(cond), rs, rt);
jalr(target);
}
// Emit a nop in the branch delay slot if required.
if (bd == PROTECT)
nop();
ASSERT_EQ(CallSize(target, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
}
int MacroAssembler::CallSize(Address target,
RelocInfo::Mode rmode,
Condition cond,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
int size = CallSize(t9, cond, rs, rt, bd);
return size + 2 * kInstrSize;
}
void MacroAssembler::Call(Address target,
RelocInfo::Mode rmode,
Condition cond,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Label start;
bind(&start);
int32_t target_int = reinterpret_cast<int32_t>(target);
// Must record previous source positions before the
// li() generates a new code target.
positions_recorder()->WriteRecordedPositions();
li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
Call(t9, cond, rs, rt, bd);
ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
}
int MacroAssembler::CallSize(Handle<Code> code,
RelocInfo::Mode rmode,
TypeFeedbackId ast_id,
Condition cond,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
AllowDeferredHandleDereference using_raw_address;
return CallSize(reinterpret_cast<Address>(code.location()),
rmode, cond, rs, rt, bd);
}
void MacroAssembler::Call(Handle<Code> code,
RelocInfo::Mode rmode,
TypeFeedbackId ast_id,
Condition cond,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
Label start;
bind(&start);
ASSERT(RelocInfo::IsCodeTarget(rmode));
if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
SetRecordedAstId(ast_id);
rmode = RelocInfo::CODE_TARGET_WITH_ID;
}
AllowDeferredHandleDereference embedding_raw_address;
Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
}
void MacroAssembler::Ret(Condition cond,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
Jump(ra, cond, rs, rt, bd);
}
void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
BlockTrampolinePoolScope block_trampoline_pool(this);
uint32_t imm28;
imm28 = jump_address(L);
imm28 &= kImm28Mask;
{ BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal references
// until associated instructions are emitted and available to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
j(imm28);
}
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
}
void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
BlockTrampolinePoolScope block_trampoline_pool(this);
uint32_t imm32;
imm32 = jump_address(L);
{ BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal references
// until associated instructions are emitted and available to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
lui(at, (imm32 & kHiMask) >> kLuiShift);
ori(at, at, (imm32 & kImm16Mask));
}
jr(at);
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
}
void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
BlockTrampolinePoolScope block_trampoline_pool(this);
uint32_t imm32;
imm32 = jump_address(L);
{ BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal references
// until associated instructions are emitted and available to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
lui(at, (imm32 & kHiMask) >> kLuiShift);
ori(at, at, (imm32 & kImm16Mask));
}
jalr(at);
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT)
nop();
}
void MacroAssembler::DropAndRet(int drop) {
Ret(USE_DELAY_SLOT);
addiu(sp, sp, drop * kPointerSize);
}
void MacroAssembler::DropAndRet(int drop,
Condition cond,
Register r1,
const Operand& r2) {
// Both Drop and Ret need to be conditional.
Label skip;
if (cond != cc_always) {
Branch(&skip, NegateCondition(cond), r1, r2);
}
Drop(drop);
Ret();
if (cond != cc_always) {
bind(&skip);
}
}
void MacroAssembler::Drop(int count,
Condition cond,
Register reg,
const Operand& op) {
if (count <= 0) {
return;
}
Label skip;
if (cond != al) {
Branch(&skip, NegateCondition(cond), reg, op);
}
addiu(sp, sp, count * kPointerSize);
if (cond != al) {
bind(&skip);
}
}
void MacroAssembler::Swap(Register reg1,
Register reg2,
Register scratch) {
if (scratch.is(no_reg)) {
Xor(reg1, reg1, Operand(reg2));
Xor(reg2, reg2, Operand(reg1));
Xor(reg1, reg1, Operand(reg2));
} else