blob: 0d3d34b6950e111392d2dc7276195f5f84518acb [file] [log] [blame]
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/compiler.h"
#include "src/debug.h"
#include "src/full-codegen.h"
#include "src/ic/ic.h"
#include "src/isolate-inl.h"
#include "src/parser.h"
#include "src/scopes.h"
#include "src/arm64/code-stubs-arm64.h"
#include "src/arm64/macro-assembler-arm64.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm_)
class JumpPatchSite BASE_EMBEDDED {
public:
explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm), reg_(NoReg) {
#ifdef DEBUG
info_emitted_ = false;
#endif
}
~JumpPatchSite() {
if (patch_site_.is_bound()) {
DCHECK(info_emitted_);
} else {
DCHECK(reg_.IsNone());
}
}
void EmitJumpIfNotSmi(Register reg, Label* target) {
// This code will be patched by PatchInlinedSmiCode, in ic-arm64.cc.
InstructionAccurateScope scope(masm_, 1);
DCHECK(!info_emitted_);
DCHECK(reg.Is64Bits());
DCHECK(!reg.Is(csp));
reg_ = reg;
__ bind(&patch_site_);
__ tbz(xzr, 0, target); // Always taken before patched.
}
void EmitJumpIfSmi(Register reg, Label* target) {
// This code will be patched by PatchInlinedSmiCode, in ic-arm64.cc.
InstructionAccurateScope scope(masm_, 1);
DCHECK(!info_emitted_);
DCHECK(reg.Is64Bits());
DCHECK(!reg.Is(csp));
reg_ = reg;
__ bind(&patch_site_);
__ tbnz(xzr, 0, target); // Never taken before patched.
}
void EmitJumpIfEitherNotSmi(Register reg1, Register reg2, Label* target) {
UseScratchRegisterScope temps(masm_);
Register temp = temps.AcquireX();
__ Orr(temp, reg1, reg2);
EmitJumpIfNotSmi(temp, target);
}
void EmitPatchInfo() {
Assembler::BlockPoolsScope scope(masm_);
InlineSmiCheckInfo::Emit(masm_, reg_, &patch_site_);
#ifdef DEBUG
info_emitted_ = true;
#endif
}
private:
MacroAssembler* masm_;
Label patch_site_;
Register reg_;
#ifdef DEBUG
bool info_emitted_;
#endif
};
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right. The actual
// argument count matches the formal parameter count expected by the
// function.
//
// The live registers are:
// - x1: the JS function object being called (i.e. ourselves).
// - cp: our context.
// - fp: our caller's frame pointer.
// - jssp: stack pointer.
// - lr: return address.
//
// The function builds a JS frame. See JavaScriptFrameConstants in
// frames-arm.h for its layout.
void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ Function compiled by full code generator");
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ Debug("stop-at", __LINE__, BREAK);
}
#endif
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
if (info->strict_mode() == SLOPPY && !info->is_native()) {
Label ok;
int receiver_offset = info->scope()->num_parameters() * kXRegSize;
__ Peek(x10, receiver_offset);
__ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
__ Ldr(x10, GlobalObjectMemOperand());
__ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
__ Poke(x10, receiver_offset);
__ Bind(&ok);
}
// Open a frame scope to indicate that there is a frame on the stack.
// The MANUAL indicates that the scope shouldn't actually generate code
// to set up the frame because we do it manually below.
FrameScope frame_scope(masm_, StackFrame::MANUAL);
// This call emits the following sequence in a way that can be patched for
// code ageing support:
// Push(lr, fp, cp, x1);
// Add(fp, jssp, 2 * kPointerSize);
info->set_prologue_offset(masm_->pc_offset());
__ Prologue(info->IsCodePreAgingActive());
info->AddNoFrameRange(0, masm_->pc_offset());
// Reserve space on the stack for locals.
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
DCHECK(!info->function()->is_generator() || locals_count == 0);
if (locals_count > 0) {
if (locals_count >= 128) {
Label ok;
DCHECK(jssp.Is(__ StackPointer()));
__ Sub(x10, jssp, locals_count * kPointerSize);
__ CompareRoot(x10, Heap::kRealStackLimitRootIndex);
__ B(hs, &ok);
__ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
__ Bind(&ok);
}
__ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
if (FLAG_optimize_for_size) {
__ PushMultipleTimes(x10 , locals_count);
} else {
const int kMaxPushes = 32;
if (locals_count >= kMaxPushes) {
int loop_iterations = locals_count / kMaxPushes;
__ Mov(x3, loop_iterations);
Label loop_header;
__ Bind(&loop_header);
// Do pushes.
__ PushMultipleTimes(x10 , kMaxPushes);
__ Subs(x3, x3, 1);
__ B(ne, &loop_header);
}
int remaining = locals_count % kMaxPushes;
// Emit the remaining pushes.
__ PushMultipleTimes(x10 , remaining);
}
}
}
bool function_in_register_x1 = true;
int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
// Argument to NewContext is the function, which is still in x1.
Comment cmnt(masm_, "[ Allocate context");
bool need_write_barrier = true;
if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
__ Mov(x10, Operand(info->scope()->GetScopeInfo()));
__ Push(x1, x10);
__ CallRuntime(Runtime::kNewScriptContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), heap_slots);
__ CallStub(&stub);
// Result of FastNewContextStub is always in new space.
need_write_barrier = false;
} else {
__ Push(x1);
__ CallRuntime(Runtime::kNewFunctionContext, 1);
}
function_in_register_x1 = false;
// Context is returned in x0. It replaces the context passed to us.
// It's saved in the stack and kept live in cp.
__ Mov(cp, x0);
__ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = info->scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
Variable* var = scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
// Load parameter from stack.
__ Ldr(x10, MemOperand(fp, parameter_offset));
// Store it in the context.
MemOperand target = ContextMemOperand(cp, var->index());
__ Str(x10, target);
// Update the write barrier.
if (need_write_barrier) {
__ RecordWriteContextSlot(
cp, target.offset(), x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
} else if (FLAG_debug_code) {
Label done;
__ JumpIfInNewSpace(cp, &done);
__ Abort(kExpectedNewSpaceObject);
__ bind(&done);
}
}
}
}
Variable* arguments = scope()->arguments();
if (arguments != NULL) {
// Function uses arguments object.
Comment cmnt(masm_, "[ Allocate arguments object");
if (!function_in_register_x1) {
// Load this again, if it's used by the local context below.
__ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
} else {
__ Mov(x3, x1);
}
// Receiver is just before the parameters on the caller's stack.
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
__ Add(x2, fp, StandardFrameConstants::kCallerSPOffset + offset);
__ Mov(x1, Smi::FromInt(num_parameters));
__ Push(x3, x2, x1);
// Arguments to ArgumentsAccessStub:
// function, receiver address, parameter count.
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
if (strict_mode() == STRICT) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
} else {
type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
ArgumentsAccessStub stub(isolate(), type);
__ CallStub(&stub);
SetVar(arguments, x0, x1, x2);
}
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
// Visit the declarations and body unless there is an illegal
// redeclaration.
if (scope()->HasIllegalRedeclaration()) {
Comment cmnt(masm_, "[ Declarations");
scope()->VisitIllegalRedeclaration(this);
} else {
PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
{ Comment cmnt(masm_, "[ Declarations");
if (scope()->is_function_scope() && scope()->function() != NULL) {
VariableDeclaration* function = scope()->function();
DCHECK(function->proxy()->var()->mode() == CONST ||
function->proxy()->var()->mode() == CONST_LEGACY);
DCHECK(function->proxy()->var()->location() != Variable::UNALLOCATED);
VisitVariableDeclaration(function);
}
VisitDeclarations(scope()->declarations());
}
{
Comment cmnt(masm_, "[ Stack check");
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
Label ok;
DCHECK(jssp.Is(__ StackPointer()));
__ CompareRoot(jssp, Heap::kStackLimitRootIndex);
__ B(hs, &ok);
PredictableCodeSizeScope predictable(masm_,
Assembler::kCallSizeWithRelocation);
__ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
__ Bind(&ok);
}
{
Comment cmnt(masm_, "[ Body");
DCHECK(loop_depth() == 0);
VisitStatements(function()->body());
DCHECK(loop_depth() == 0);
}
}
// Always emit a 'return undefined' in case control fell off the end of
// the body.
{ Comment cmnt(masm_, "[ return <undefined>;");
__ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
}
EmitReturnSequence();
// Force emission of the pools, so they don't get emitted in the middle
// of the back edge table.
masm()->CheckVeneerPool(true, false);
masm()->CheckConstPool(true, false);
}
void FullCodeGenerator::ClearAccumulator() {
__ Mov(x0, Smi::FromInt(0));
}
void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
__ Mov(x2, Operand(profiling_counter_));
__ Ldr(x3, FieldMemOperand(x2, Cell::kValueOffset));
__ Subs(x3, x3, Smi::FromInt(delta));
__ Str(x3, FieldMemOperand(x2, Cell::kValueOffset));
}
void FullCodeGenerator::EmitProfilingCounterReset() {
int reset_value = FLAG_interrupt_budget;
if (info_->is_debug()) {
// Detect debug break requests as soon as possible.
reset_value = FLAG_interrupt_budget >> 4;
}
__ Mov(x2, Operand(profiling_counter_));
__ Mov(x3, Smi::FromInt(reset_value));
__ Str(x3, FieldMemOperand(x2, Cell::kValueOffset));
}
void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
Label* back_edge_target) {
DCHECK(jssp.Is(__ StackPointer()));
Comment cmnt(masm_, "[ Back edge bookkeeping");
// Block literal pools whilst emitting back edge code.
Assembler::BlockPoolsScope block_const_pool(masm_);
Label ok;
DCHECK(back_edge_target->is_bound());
// We want to do a round rather than a floor of distance/kCodeSizeMultiplier
// to reduce the absolute error due to the integer division. To do that,
// we add kCodeSizeMultiplier/2 to the distance (equivalent to adding 0.5 to
// the result).
int distance =
masm_->SizeOfCodeGeneratedSince(back_edge_target) + kCodeSizeMultiplier / 2;
int weight = Min(kMaxBackEdgeWeight,
Max(1, distance / kCodeSizeMultiplier));
EmitProfilingCounterDecrement(weight);
__ B(pl, &ok);
__ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
RecordBackEdge(stmt->OsrEntryId());
EmitProfilingCounterReset();
__ Bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
// Record a mapping of the OSR id to this PC. This is used if the OSR
// entry becomes the target of a bailout. We don't expect it to be, but
// we want it to work if it is.
PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
}
void FullCodeGenerator::EmitReturnSequence() {
Comment cmnt(masm_, "[ Return sequence");
if (return_label_.is_bound()) {
__ B(&return_label_);
} else {
__ Bind(&return_label_);
if (FLAG_trace) {
// Push the return value on the stack as the parameter.
// Runtime::TraceExit returns its parameter in x0.
__ Push(result_register());
__ CallRuntime(Runtime::kTraceExit, 1);
DCHECK(x0.Is(result_register()));
}
// Pretend that the exit is a backwards jump to the entry.
int weight = 1;
if (info_->ShouldSelfOptimize()) {
weight = FLAG_interrupt_budget / FLAG_self_opt_count;
} else {
int distance = masm_->pc_offset() + kCodeSizeMultiplier / 2;
weight = Min(kMaxBackEdgeWeight,
Max(1, distance / kCodeSizeMultiplier));
}
EmitProfilingCounterDecrement(weight);
Label ok;
__ B(pl, &ok);
__ Push(x0);
__ Call(isolate()->builtins()->InterruptCheck(),
RelocInfo::CODE_TARGET);
__ Pop(x0);
EmitProfilingCounterReset();
__ Bind(&ok);
// Make sure that the constant pool is not emitted inside of the return
// sequence. This sequence can get patched when the debugger is used. See
// debug-arm64.cc:BreakLocationIterator::SetDebugBreakAtReturn().
{
InstructionAccurateScope scope(masm_,
Assembler::kJSRetSequenceInstructions);
CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
__ RecordJSReturn();
// This code is generated using Assembler methods rather than Macro
// Assembler methods because it will be patched later on, and so the size
// of the generated code must be consistent.
const Register& current_sp = __ StackPointer();
// Nothing ensures 16 bytes alignment here.
DCHECK(!current_sp.Is(csp));
__ mov(current_sp, fp);
int no_frame_start = masm_->pc_offset();
__ ldp(fp, lr, MemOperand(current_sp, 2 * kXRegSize, PostIndex));
// Drop the arguments and receiver and return.
// TODO(all): This implementation is overkill as it supports 2**31+1
// arguments, consider how to improve it without creating a security
// hole.
__ ldr_pcrel(ip0, (3 * kInstructionSize) >> kLoadLiteralScaleLog2);
__ add(current_sp, current_sp, ip0);
__ ret();
__ dc64(kXRegSize * (info_->scope()->num_parameters() + 1));
info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
}
void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
}
void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
codegen()->GetVar(result_register(), var);
}
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
codegen()->GetVar(result_register(), var);
__ Push(result_register());
}
void FullCodeGenerator::TestContext::Plug(Variable* var) const {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
// For simplicity we always test the accumulator register.
codegen()->GetVar(result_register(), var);
codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
}
void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
// Root values have no side effects.
}
void FullCodeGenerator::AccumulatorValueContext::Plug(
Heap::RootListIndex index) const {
__ LoadRoot(result_register(), index);
}
void FullCodeGenerator::StackValueContext::Plug(
Heap::RootListIndex index) const {
__ LoadRoot(result_register(), index);
__ Push(result_register());
}
void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
false_label_);
if (index == Heap::kUndefinedValueRootIndex ||
index == Heap::kNullValueRootIndex ||
index == Heap::kFalseValueRootIndex) {
if (false_label_ != fall_through_) __ B(false_label_);
} else if (index == Heap::kTrueValueRootIndex) {
if (true_label_ != fall_through_) __ B(true_label_);
} else {
__ LoadRoot(result_register(), index);
codegen()->DoTest(this);
}
}
void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
}
void FullCodeGenerator::AccumulatorValueContext::Plug(
Handle<Object> lit) const {
__ Mov(result_register(), Operand(lit));
}
void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
// Immediates cannot be pushed directly.
__ Mov(result_register(), Operand(lit));
__ Push(result_register());
}
void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
codegen()->PrepareForBailoutBeforeSplit(condition(),
true,
true_label_,
false_label_);
DCHECK(!lit->IsUndetectableObject()); // There are no undetectable literals.
if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
if (false_label_ != fall_through_) __ B(false_label_);
} else if (lit->IsTrue() || lit->IsJSObject()) {
if (true_label_ != fall_through_) __ B(true_label_);
} else if (lit->IsString()) {
if (String::cast(*lit)->length() == 0) {
if (false_label_ != fall_through_) __ B(false_label_);
} else {
if (true_label_ != fall_through_) __ B(true_label_);
}
} else if (lit->IsSmi()) {
if (Smi::cast(*lit)->value() == 0) {
if (false_label_ != fall_through_) __ B(false_label_);
} else {
if (true_label_ != fall_through_) __ B(true_label_);
}
} else {
// For simplicity we always test the accumulator register.
__ Mov(result_register(), Operand(lit));
codegen()->DoTest(this);
}
}
void FullCodeGenerator::EffectContext::DropAndPlug(int count,
Register reg) const {
DCHECK(count > 0);
__ Drop(count);
}
void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
int count,
Register reg) const {
DCHECK(count > 0);
__ Drop(count);
__ Move(result_register(), reg);
}
void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
Register reg) const {
DCHECK(count > 0);
if (count > 1) __ Drop(count - 1);
__ Poke(reg, 0);
}
void FullCodeGenerator::TestContext::DropAndPlug(int count,
Register reg) const {
DCHECK(count > 0);
// For simplicity we always test the accumulator register.
__ Drop(count);
__ Mov(result_register(), reg);
codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
codegen()->DoTest(this);
}
void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
Label* materialize_false) const {
DCHECK(materialize_true == materialize_false);
__ Bind(materialize_true);
}
void FullCodeGenerator::AccumulatorValueContext::Plug(
Label* materialize_true,
Label* materialize_false) const {
Label done;
__ Bind(materialize_true);
__ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
__ B(&done);
__ Bind(materialize_false);
__ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
__ Bind(&done);
}
void FullCodeGenerator::StackValueContext::Plug(
Label* materialize_true,
Label* materialize_false) const {
Label done;
__ Bind(materialize_true);
__ LoadRoot(x10, Heap::kTrueValueRootIndex);
__ B(&done);
__ Bind(materialize_false);
__ LoadRoot(x10, Heap::kFalseValueRootIndex);
__ Bind(&done);
__ Push(x10);
}
void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
Label* materialize_false) const {
DCHECK(materialize_true == true_label_);
DCHECK(materialize_false == false_label_);
}
void FullCodeGenerator::EffectContext::Plug(bool flag) const {
}
void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
Heap::RootListIndex value_root_index =
flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
__ LoadRoot(result_register(), value_root_index);
}
void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
Heap::RootListIndex value_root_index =
flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
__ LoadRoot(x10, value_root_index);
__ Push(x10);
}
void FullCodeGenerator::TestContext::Plug(bool flag) const {
codegen()->PrepareForBailoutBeforeSplit(condition(),
true,
true_label_,
false_label_);
if (flag) {
if (true_label_ != fall_through_) {
__ B(true_label_);
}
} else {
if (false_label_ != fall_through_) {
__ B(false_label_);
}
}
}
void FullCodeGenerator::DoTest(Expression* condition,
Label* if_true,
Label* if_false,
Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(ic, condition->test_id());
__ CompareAndSplit(result_register(), 0, ne, if_true, if_false, fall_through);
}
// If (cond), branch to if_true.
// If (!cond), branch to if_false.
// fall_through is used as an optimization in cases where only one branch
// instruction is necessary.
void FullCodeGenerator::Split(Condition cond,
Label* if_true,
Label* if_false,
Label* fall_through) {
if (if_false == fall_through) {
__ B(cond, if_true);
} else if (if_true == fall_through) {
DCHECK(if_false != fall_through);
__ B(NegateCondition(cond), if_false);
} else {
__ B(cond, if_true);
__ B(if_false);
}
}
MemOperand FullCodeGenerator::StackOperand(Variable* var) {
// Offset is negative because higher indexes are at lower addresses.
int offset = -var->index() * kXRegSize;
// Adjust by a (parameter or local) base offset.
if (var->IsParameter()) {
offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
} else {
offset += JavaScriptFrameConstants::kLocal0Offset;
}
return MemOperand(fp, offset);
}
MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
DCHECK(var->IsContextSlot() || var->IsStackAllocated());
if (var->IsContextSlot()) {
int context_chain_length = scope()->ContextChainLength(var->scope());
__ LoadContext(scratch, context_chain_length);
return ContextMemOperand(scratch, var->index());
} else {
return StackOperand(var);
}
}
void FullCodeGenerator::GetVar(Register dest, Variable* var) {
// Use destination as scratch.
MemOperand location = VarOperand(var, dest);
__ Ldr(dest, location);
}
void FullCodeGenerator::SetVar(Variable* var,
Register src,
Register scratch0,
Register scratch1) {
DCHECK(var->IsContextSlot() || var->IsStackAllocated());
DCHECK(!AreAliased(src, scratch0, scratch1));
MemOperand location = VarOperand(var, scratch0);
__ Str(src, location);
// Emit the write barrier code if the location is in the heap.
if (var->IsContextSlot()) {
// scratch0 contains the correct context.
__ RecordWriteContextSlot(scratch0,
location.offset(),
src,
scratch1,
kLRHasBeenSaved,
kDontSaveFPRegs);
}
}
void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
bool should_normalize,
Label* if_true,
Label* if_false) {
// Only prepare for bailouts before splits if we're in a test
// context. Otherwise, we let the Visit function deal with the
// preparation to avoid preparing with the same AST id twice.
if (!context()->IsTest() || !info_->IsOptimizable()) return;
// TODO(all): Investigate to see if there is something to work on here.
Label skip;
if (should_normalize) {
__ B(&skip);
}
PrepareForBailout(expr, TOS_REG);
if (should_normalize) {
__ CompareRoot(x0, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, NULL);
__ Bind(&skip);
}
}
void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// The variable in the declaration always resides in the current function
// context.
DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
if (generate_debug_code_) {
// Check that we're not inside a with or catch context.
__ Ldr(x1, FieldMemOperand(cp, HeapObject::kMapOffset));
__ CompareRoot(x1, Heap::kWithContextMapRootIndex);
__ Check(ne, kDeclarationInWithContext);
__ CompareRoot(x1, Heap::kCatchContextMapRootIndex);
__ Check(ne, kDeclarationInCatchContext);
}
}
void FullCodeGenerator::VisitVariableDeclaration(
VariableDeclaration* declaration) {
// If it was not possible to allocate the variable at compile time, we
// need to "declare" it at runtime to make sure it actually exists in the
// local context.
VariableProxy* proxy = declaration->proxy();
VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
case Variable::UNALLOCATED:
globals_->Add(variable->name(), zone());
globals_->Add(variable->binding_needs_init()
? isolate()->factory()->the_hole_value()
: isolate()->factory()->undefined_value(),
zone());
break;
case Variable::PARAMETER:
case Variable::LOCAL:
if (hole_init) {
Comment cmnt(masm_, "[ VariableDeclaration");
__ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
__ Str(x10, StackOperand(variable));
}
break;
case Variable::CONTEXT:
if (hole_init) {
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
__ Str(x10, ContextMemOperand(cp, variable->index()));
// No write barrier since the_hole_value is in old space.
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
}
break;
case Variable::LOOKUP: {
Comment cmnt(masm_, "[ VariableDeclaration");
__ Mov(x2, Operand(variable->name()));
// Declaration nodes are always introduced in one of four modes.
DCHECK(IsDeclaredVariableMode(mode));
PropertyAttributes attr = IsImmutableVariableMode(mode) ? READ_ONLY
: NONE;
__ Mov(x1, Smi::FromInt(attr));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
if (hole_init) {
__ LoadRoot(x0, Heap::kTheHoleValueRootIndex);
__ Push(cp, x2, x1, x0);
} else {
// Pushing 0 (xzr) indicates no initial value.
__ Push(cp, x2, x1, xzr);
}
__ CallRuntime(Runtime::kDeclareLookupSlot, 4);
break;
}
}
}
void FullCodeGenerator::VisitFunctionDeclaration(
FunctionDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
case Variable::UNALLOCATED: {
globals_->Add(variable->name(), zone());
Handle<SharedFunctionInfo> function =
Compiler::BuildFunctionInfo(declaration->fun(), script(), info_);
// Check for stack overflow exception.
if (function.is_null()) return SetStackOverflow();
globals_->Add(function, zone());
break;
}
case Variable::PARAMETER:
case Variable::LOCAL: {
Comment cmnt(masm_, "[ Function Declaration");
VisitForAccumulatorValue(declaration->fun());
__ Str(result_register(), StackOperand(variable));
break;
}
case Variable::CONTEXT: {
Comment cmnt(masm_, "[ Function Declaration");
EmitDebugCheckDeclarationContext(variable);
VisitForAccumulatorValue(declaration->fun());
__ Str(result_register(), ContextMemOperand(cp, variable->index()));
int offset = Context::SlotOffset(variable->index());
// We know that we have written a function, which is not a smi.
__ RecordWriteContextSlot(cp,
offset,
result_register(),
x2,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
break;
}
case Variable::LOOKUP: {
Comment cmnt(masm_, "[ Function Declaration");
__ Mov(x2, Operand(variable->name()));
__ Mov(x1, Smi::FromInt(NONE));
__ Push(cp, x2, x1);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
__ CallRuntime(Runtime::kDeclareLookupSlot, 4);
break;
}
}
}
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
Variable* variable = declaration->proxy()->var();
DCHECK(variable->location() == Variable::CONTEXT);
DCHECK(variable->interface()->IsFrozen());
Comment cmnt(masm_, "[ ModuleDeclaration");
EmitDebugCheckDeclarationContext(variable);
// Load instance object.
__ LoadContext(x1, scope_->ContextChainLength(scope_->ScriptScope()));
__ Ldr(x1, ContextMemOperand(x1, variable->interface()->Index()));
__ Ldr(x1, ContextMemOperand(x1, Context::EXTENSION_INDEX));
// Assign it.
__ Str(x1, ContextMemOperand(cp, variable->index()));
// We know that we have written a module, which is not a smi.
__ RecordWriteContextSlot(cp,
Context::SlotOffset(variable->index()),
x1,
x3,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
// Traverse info body.
Visit(declaration->module());
}
void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
case Variable::UNALLOCATED:
// TODO(rossberg)
break;
case Variable::CONTEXT: {
Comment cmnt(masm_, "[ ImportDeclaration");
EmitDebugCheckDeclarationContext(variable);
// TODO(rossberg)
break;
}
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::LOOKUP:
UNREACHABLE();
}
}
void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
// TODO(rossberg)
}
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ Mov(x11, Operand(pairs));
Register flags = xzr;
if (Smi::FromInt(DeclareGlobalsFlags())) {
flags = x10;
__ Mov(flags, Smi::FromInt(DeclareGlobalsFlags()));
}
__ Push(cp, x11, flags);
__ CallRuntime(Runtime::kDeclareGlobals, 3);
// Return value is ignored.
}
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
__ CallRuntime(Runtime::kDeclareModules, 1);
// Return value is ignored.
}
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
ASM_LOCATION("FullCodeGenerator::VisitSwitchStatement");
Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt);
SetStatementPosition(stmt);
// Keep the switch value on the stack until a case matches.
VisitForStackValue(stmt->tag());
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
ZoneList<CaseClause*>* clauses = stmt->cases();
CaseClause* default_clause = NULL; // Can occur anywhere in the list.
Label next_test; // Recycled for each test.
// Compile all the tests with branches to their bodies.
for (int i = 0; i < clauses->length(); i++) {
CaseClause* clause = clauses->at(i);
clause->body_target()->Unuse();
// The default is not a test, but remember it as final fall through.
if (clause->is_default()) {
default_clause = clause;
continue;
}
Comment cmnt(masm_, "[ Case comparison");
__ Bind(&next_test);
next_test.Unuse();
// Compile the label expression.
VisitForAccumulatorValue(clause->label());
// Perform the comparison as if via '==='.
__ Peek(x1, 0); // Switch value.
JumpPatchSite patch_site(masm_);
if (ShouldInlineSmiCase(Token::EQ_STRICT)) {
Label slow_case;
patch_site.EmitJumpIfEitherNotSmi(x0, x1, &slow_case);
__ Cmp(x1, x0);
__ B(ne, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ B(clause->body_target());
__ Bind(&slow_case);
}
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic =
CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
Label skip;
__ B(&skip);
PrepareForBailout(clause, TOS_REG);
__ JumpIfNotRoot(x0, Heap::kTrueValueRootIndex, &next_test);
__ Drop(1);
__ B(clause->body_target());
__ Bind(&skip);
__ Cbnz(x0, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ B(clause->body_target());
}
// Discard the test value and jump to the default if present, otherwise to
// the end of the statement.
__ Bind(&next_test);
__ Drop(1); // Switch value is no longer needed.
if (default_clause == NULL) {
__ B(nested_statement.break_label());
} else {
__ B(default_clause->body_target());
}
// Compile all the case bodies.
for (int i = 0; i < clauses->length(); i++) {
Comment cmnt(masm_, "[ Case body");
CaseClause* clause = clauses->at(i);
__ Bind(clause->body_target());
PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
VisitStatements(clause->statements());
}
__ Bind(nested_statement.break_label());
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
}
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
ASM_LOCATION("FullCodeGenerator::VisitForInStatement");
Comment cmnt(masm_, "[ ForInStatement");
FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
// TODO(all): This visitor probably needs better comments and a revisit.
SetStatementPosition(stmt);
Label loop, exit;
ForIn loop_statement(this, stmt);
increment_loop_depth();
// Get the object to enumerate over. If the object is null or undefined, skip
// over the loop. See ECMA-262 version 5, section 12.6.4.
SetExpressionPosition(stmt->enumerable());
VisitForAccumulatorValue(stmt->enumerable());
__ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex, &exit);
Register null_value = x15;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ Cmp(x0, null_value);
__ B(eq, &exit);
PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
// Convert the object to a JS object.
Label convert, done_convert;
__ JumpIfSmi(x0, &convert);
__ JumpIfObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE, &done_convert, ge);
__ Bind(&convert);
__ Push(x0);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ Bind(&done_convert);
PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
__ Push(x0);
// Check for proxies.
Label call_runtime;
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ JumpIfObjectType(x0, x10, x11, LAST_JS_PROXY_TYPE, &call_runtime, le);
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
// guarantee cache validity, call the runtime system to check cache
// validity or get the property names in a fixed array.
__ CheckEnumCache(x0, null_value, x10, x11, x12, x13, &call_runtime);
// The enum cache is valid. Load the map of the object being
// iterated over and use the cache for the iteration.
Label use_cache;
__ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset));
__ B(&use_cache);
// Get the set of properties to enumerate.
__ Bind(&call_runtime);
__ Push(x0); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
// If we got a map from the runtime call, we can do a fast
// modification check. Otherwise, we got a fixed array, and we have
// to do a slow check.
Label fixed_array, no_descriptors;
__ Ldr(x2, FieldMemOperand(x0, HeapObject::kMapOffset));
__ JumpIfNotRoot(x2, Heap::kMetaMapRootIndex, &fixed_array);
// We got a map in register x0. Get the enumeration cache from it.
__ Bind(&use_cache);
__ EnumLengthUntagged(x1, x0);
__ Cbz(x1, &no_descriptors);
__ LoadInstanceDescriptors(x0, x2);
__ Ldr(x2, FieldMemOperand(x2, DescriptorArray::kEnumCacheOffset));
__ Ldr(x2,
FieldMemOperand(x2, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
__ SmiTag(x1);
// Map, enumeration cache, enum cache length, zero (both last as smis).
__ Push(x0, x2, x1, xzr);
__ B(&loop);
__ Bind(&no_descriptors);
__ Drop(1);
__ B(&exit);
// We got a fixed array in register x0. Iterate through that.
__ Bind(&fixed_array);
__ LoadObject(x1, FeedbackVector());
__ Mov(x10, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
int vector_index = FeedbackVector()->GetIndex(slot);
__ Str(x10, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(vector_index)));
__ Mov(x1, Smi::FromInt(1)); // Smi indicates slow check.
__ Peek(x10, 0); // Get enumerated object.
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
// TODO(all): similar check was done already. Can we avoid it here?
__ CompareObjectType(x10, x11, x12, LAST_JS_PROXY_TYPE);
DCHECK(Smi::FromInt(0) == 0);
__ CzeroX(x1, le); // Zero indicates proxy.
__ Ldr(x2, FieldMemOperand(x0, FixedArray::kLengthOffset));
// Smi and array, fixed array length (as smi) and initial index.
__ Push(x1, x0, x2, xzr);
// Generate code for doing the condition check.
PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ Bind(&loop);
SetExpressionPosition(stmt->each());
// Load the current count to x0, load the length to x1.
__ PeekPair(x0, x1, 0);
__ Cmp(x0, x1); // Compare to the array length.
__ B(hs, loop_statement.break_label());
// Get the current entry of the array into register r3.
__ Peek(x10, 2 * kXRegSize);
__ Add(x10, x10, Operand::UntagSmiAndScale(x0, kPointerSizeLog2));
__ Ldr(x3, MemOperand(x10, FixedArray::kHeaderSize - kHeapObjectTag));
// Get the expected map from the stack or a smi in the
// permanent slow case into register x10.
__ Peek(x2, 3 * kXRegSize);
// Check if the expected map still matches that of the enumerable.
// If not, we may have to filter the key.
Label update_each;
__ Peek(x1, 4 * kXRegSize);
__ Ldr(x11, FieldMemOperand(x1, HeapObject::kMapOffset));
__ Cmp(x11, x2);
__ B(eq, &update_each);
// For proxies, no filtering is done.
// TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
STATIC_ASSERT(kSmiTag == 0);
__ Cbz(x2, &update_each);
// Convert the entry to a string or (smi) 0 if it isn't a property
// any more. If the property has been removed while iterating, we
// just skip it.
__ Push(x1, x3);
__ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
__ Mov(x3, x0);
__ Cbz(x0, loop_statement.continue_label());
// Update the 'each' property or variable from the possibly filtered
// entry in register x3.
__ Bind(&update_each);
__ Mov(result_register(), x3);
// Perform the assignment as if via '='.
{ EffectContext context(this);
EmitAssignment(stmt->each());
}
// Generate code for the body of the loop.
Visit(stmt->body());
// Generate code for going to the next element by incrementing
// the index (smi) stored on top of the stack.
__ Bind(loop_statement.continue_label());
// TODO(all): We could use a callee saved register to avoid popping.
__ Pop(x0);
__ Add(x0, x0, Smi::FromInt(1));
__ Push(x0);
EmitBackEdgeBookkeeping(stmt, &loop);
__ B(&loop);
// Remove the pointers stored on the stack.
__ Bind(loop_statement.break_label());
__ Drop(5);
// Exit and decrement the loop depth.
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
__ Bind(&exit);
decrement_loop_depth();
}
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
bool pretenure) {
// Use the fast case closure allocation code that allocates in new space for
// nested functions that don't need literals cloning. If we're running with
// the --always-opt or the --prepare-always-opt flag, we need to use the
// runtime function so that the new function we are creating here gets a
// chance to have its code optimized and doesn't just get a copy of the
// existing unoptimized code.
if (!FLAG_always_opt &&
!FLAG_prepare_always_opt &&
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
FastNewClosureStub stub(isolate(), info->strict_mode(), info->kind());
__ Mov(x2, Operand(info));
__ CallStub(&stub);
} else {
__ Mov(x11, Operand(info));
__ LoadRoot(x10, pretenure ? Heap::kTrueValueRootIndex
: Heap::kFalseValueRootIndex);
__ Push(cp, x11, x10);
__ CallRuntime(Runtime::kNewClosure, 3);
}
context()->Plug(x0);
}
void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
EmitVariableLoad(expr);
}
void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
Comment cnmt(masm_, "[ SuperReference ");
__ ldr(LoadDescriptor::ReceiverRegister(),
MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
__ Mov(LoadDescriptor::NameRegister(), Operand(home_object_symbol));
if (FLAG_vector_ics) {
__ Mov(VectorLoadICDescriptor::SlotRegister(),
SmiFromSlot(expr->HomeObjectFeedbackSlot()));
CallLoadIC(NOT_CONTEXTUAL);
} else {
CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
}
__ Mov(x10, Operand(isolate()->factory()->undefined_value()));
__ cmp(x0, x10);
Label done;
__ b(&done, ne);
__ CallRuntime(Runtime::kThrowNonMethodError, 0);
__ bind(&done);
}
void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
int offset) {
if (NeedsHomeObject(initializer)) {
__ Peek(StoreDescriptor::ReceiverRegister(), 0);
__ Mov(StoreDescriptor::NameRegister(),
Operand(isolate()->factory()->home_object_symbol()));
__ Peek(StoreDescriptor::ValueRegister(), offset * kPointerSize);
CallStoreIC();
}
}
void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
TypeofState typeof_state,
Label* slow) {
Register current = cp;
Register next = x10;
Register temp = x11;
Scope* s = scope();
while (s != NULL) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
__ Ldr(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
__ Cbnz(temp, slow);
}
// Load next context in chain.
__ Ldr(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering cp.
current = next;
}
// If no outer scope calls eval, we do not need to check more
// context extensions.
if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
s = s->outer_scope();
}
if (s->is_eval_scope()) {
Label loop, fast;
__ Mov(next, current);
__ Bind(&loop);
// Terminate at native context.
__ Ldr(temp, FieldMemOperand(next, HeapObject::kMapOffset));
__ JumpIfRoot(temp, Heap::kNativeContextMapRootIndex, &fast);
// Check that extension is NULL.
__ Ldr(temp, ContextMemOperand(next, Context::EXTENSION_INDEX));
__ Cbnz(temp, slow);
// Load next context in chain.
__ Ldr(next, ContextMemOperand(next, Context::PREVIOUS_INDEX));
__ B(&loop);
__ Bind(&fast);
}
__ Ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
__ Mov(LoadDescriptor::NameRegister(), Operand(proxy->var()->name()));
if (FLAG_vector_ics) {
__ Mov(VectorLoadICDescriptor::SlotRegister(),
SmiFromSlot(proxy->VariableFeedbackSlot()));
}
ContextualMode mode = (typeof_state == INSIDE_TYPEOF) ? NOT_CONTEXTUAL
: CONTEXTUAL;
CallLoadIC(mode);
}
MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
Label* slow) {
DCHECK(var->IsContextSlot());
Register context = cp;
Register next = x10;
Register temp = x11;
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
__ Ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
__ Cbnz(temp, slow);
}
__ Ldr(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering cp.
context = next;
}
}
// Check that last extension is NULL.
__ Ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
__ Cbnz(temp, slow);
// This function is used only for loads, not stores, so it's safe to
// return an cp-based operand (the write barrier cannot be allowed to
// destroy the cp register).
return ContextMemOperand(context, var->index());
}
void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
TypeofState typeof_state,
Label* slow,
Label* done) {
// Generate fast-case code for variables that might be shadowed by
// eval-introduced variables. Eval is used a lot without
// introducing variables. In those cases, we do not want to
// perform a runtime call for all variables in the scope
// containing the eval.
Variable* var = proxy->var();
if (var->mode() == DYNAMIC_GLOBAL) {
EmitLoadGlobalCheckExtensions(proxy, typeof_state, slow);
__ B(done);
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ Ldr(x0, ContextSlotOperandCheckExtensions(local, slow));
if (local->mode() == LET || local->mode() == CONST ||
local->mode() == CONST_LEGACY) {
__ JumpIfNotRoot(x0, Heap::kTheHoleValueRootIndex, done);
if (local->mode() == CONST_LEGACY) {
__ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
} else { // LET || CONST
__ Mov(x0, Operand(var->name()));
__ Push(x0);
__ CallRuntime(Runtime::kThrowReferenceError, 1);
}
}
__ B(done);
}
}
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// Record position before possible IC call.
SetSourcePosition(proxy->position());
Variable* var = proxy->var();
// Three cases: global variables, lookup variables, and all other types of
// variables.
switch (var->location()) {
case Variable::UNALLOCATED: {
Comment cmnt(masm_, "Global variable");
__ Ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
__ Mov(LoadDescriptor::NameRegister(), Operand(var->name()));
if (FLAG_vector_ics) {
__ Mov(VectorLoadICDescriptor::SlotRegister(),
SmiFromSlot(proxy->VariableFeedbackSlot()));
}
CallLoadIC(CONTEXTUAL);
context()->Plug(x0);
break;
}
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::CONTEXT: {
Comment cmnt(masm_, var->IsContextSlot()
? "Context variable"
: "Stack variable");
if (var->binding_needs_init()) {
// var->scope() may be NULL when the proxy is located in eval code and
// refers to a potential outside binding. Currently those bindings are
// always looked up dynamically, i.e. in that case
// var->location() == LOOKUP.
// always holds.
DCHECK(var->scope() != NULL);
// Check if the binding really needs an initialization check. The check
// can be skipped in the following situation: we have a LET or CONST
// binding in harmony mode, both the Variable and the VariableProxy have
// the same declaration scope (i.e. they are both in global code, in the
// same function or in the same eval code) and the VariableProxy is in
// the source physically located after the initializer of the variable.
//
// We cannot skip any initialization checks for CONST in non-harmony
// mode because const variables may be declared but never initialized:
// if (false) { const x; }; var y = x;
//
// The condition on the declaration scopes is a conservative check for
// nested functions that access a binding and are called before the
// binding is initialized:
// function() { f(); let x = 1; function f() { x = 2; } }
//
bool skip_init_check;
if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
skip_init_check = false;
} else {
// Check that we always have valid source position.
DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
DCHECK(proxy->position() != RelocInfo::kNoPosition);
skip_init_check = var->mode() != CONST_LEGACY &&
var->initializer_position() < proxy->position();
}
if (!skip_init_check) {
// Let and const need a read barrier.
GetVar(x0, var);
Label done;
__ JumpIfNotRoot(x0, Heap::kTheHoleValueRootIndex, &done);
if (var->mode() == LET || var->mode() == CONST) {
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
__ Mov(x0, Operand(var->name()));
__ Push(x0);
__ CallRuntime(Runtime::kThrowReferenceError, 1);
__ Bind(&done);
} else {
// Uninitalized const bindings outside of harmony mode are unholed.
DCHECK(var->mode() == CONST_LEGACY);
__ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
__ Bind(&done);
}
context()->Plug(x0);
break;
}
}
context()->Plug(var);
break;
}
case Variable::LOOKUP: {
Label done, slow;
// Generate code for loading from variables potentially shadowed by
// eval-introduced variables.
EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
__ Bind(&slow);
Comment cmnt(masm_, "Lookup variable");
__ Mov(x1, Operand(var->name()));
__ Push(cp, x1); // Context and name.
__ CallRuntime(Runtime::kLoadLookupSlot, 2);
__ Bind(&done);
context()->Plug(x0);
break;
}
}
}
void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ RegExpLiteral");
Label materialized;
// Registers will be used as follows:
// x5 = materialized value (RegExp literal)
// x4 = JS function, literals array
// x3 = literal index
// x2 = RegExp pattern
// x1 = RegExp flags
// x0 = RegExp literal clone
__ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Ldr(x4, FieldMemOperand(x10, JSFunction::kLiteralsOffset));
int literal_offset =
FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
__ Ldr(x5, FieldMemOperand(x4, literal_offset));
__ JumpIfNotRoot(x5, Heap::kUndefinedValueRootIndex, &materialized);
// Create regexp literal using runtime function.
// Result will be in x0.
__ Mov(x3, Smi::FromInt(expr->literal_index()));
__ Mov(x2, Operand(expr->pattern()));
__ Mov(x1, Operand(expr->flags()));
__ Push(x4, x3, x2, x1);
__ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
__ Mov(x5, x0);
__ Bind(&materialized);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
Label allocated, runtime_allocate;
__ Allocate(size, x0, x2, x3, &runtime_allocate, TAG_OBJECT);
__ B(&allocated);
__ Bind(&runtime_allocate);
__ Mov(x10, Smi::FromInt(size));
__ Push(x5, x10);
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
__ Pop(x5);
__ Bind(&allocated);
// After this, registers are used as follows:
// x0: Newly allocated regexp.
// x5: Materialized regexp.
// x10, x11, x12: temps.
__ CopyFields(x0, x5, CPURegList(x10, x11, x12), size / kPointerSize);
context()->Plug(x0);
}
void FullCodeGenerator::EmitAccessor(Expression* expression) {
if (expression == NULL) {
__ LoadRoot(x10, Heap::kNullValueRootIndex);
__ Push(x10);
} else {
VisitForStackValue(expression);
}
}
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
expr->BuildConstantProperties(isolate());
Handle<FixedArray> constant_properties = expr->constant_properties();
__ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Ldr(x3, FieldMemOperand(x3, JSFunction::kLiteralsOffset));
__ Mov(x2, Smi::FromInt(expr->literal_index()));
__ Mov(x1, Operand(constant_properties));
int flags = expr->fast_elements()
? ObjectLiteral::kFastElements
: ObjectLiteral::kNoFlags;
flags |= expr->has_function()
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
__ Mov(x0, Smi::FromInt(flags));
int properties_count = constant_properties->length() / 2;
const int max_cloned_properties =
FastCloneShallowObjectStub::kMaximumClonedProperties;
if (expr->may_store_doubles() || expr->depth() > 1 ||
masm()->serializer_enabled() || flags != ObjectLiteral::kFastElements ||
properties_count > max_cloned_properties) {
__ Push(x3, x2, x1, x0);
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
FastCloneShallowObjectStub stub(isolate(), properties_count);
__ CallStub(&stub);
}
PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
// If result_saved is true the result is on top of the stack. If
// result_saved is false the result is in x0.
bool result_saved = false;
// Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the
// marked expressions, no store code is emitted.
expr->CalculateEmitStore(zone());
AccessorTable accessor_table(zone());
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
if (property->IsCompileTimeValue()) continue;
Literal* key = property->key();
Expression* value = property->value();
if (!result_saved) {
__ Push(x0); // Save result on stack
result_saved = true;
}
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
UNREACHABLE();
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value()));
// Fall through.
case ObjectLiteral::Property::COMPUTED:
// It is safe to use [[Put]] here because the boilerplate already
// contains computed properties with an uninitialized value.
if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
DCHECK(StoreDescriptor::ValueRegister().is(x0));
__ Mov(StoreDescriptor::NameRegister(), Operand(key->value()));
__ Peek(StoreDescriptor::ReceiverRegister(), 0);
CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
__ Mov(StoreDescriptor::ReceiverRegister(), x0);
__ Mov(StoreDescriptor::NameRegister(),
Operand(isolate()->factory()->home_object_symbol()));
__ Peek(StoreDescriptor::ValueRegister(), 0);
CallStoreIC();
}
} else {
VisitForEffect(value);
}
break;
}
if (property->emit_store()) {
// Duplicate receiver on stack.
__ Peek(x0, 0);
__ Push(x0);
VisitForStackValue(key);
VisitForStackValue(value);
EmitSetHomeObjectIfNeeded(value, 2);
__ Mov(x0, Smi::FromInt(SLOPPY)); // Strict mode
__ Push(x0);
__ CallRuntime(Runtime::kSetProperty, 4);
} else {
VisitForEffect(key);
VisitForEffect(value);
}
break;
case ObjectLiteral::Property::PROTOTYPE:
if (property->emit_store()) {
// Duplicate receiver on stack.
__ Peek(x0, 0);
__ Push(x0);
VisitForStackValue(value);
__ CallRuntime(Runtime::kInternalSetPrototype, 2);
} else {
VisitForEffect(value);
}
break;
case ObjectLiteral::Property::GETTER:
accessor_table.lookup(key)->second->getter = value;
break;
case ObjectLiteral::Property::SETTER:
accessor_table.lookup(key)->second->setter = value;
break;
}
}
// Emit code to define accessors, using only a single call to the runtime for
// each pair of corresponding getters and setters.
for (AccessorTable::Iterator it = accessor_table.begin();
it != accessor_table.end();
++it) {
__ Peek(x10, 0); // Duplicate receiver.
__ Push(x10);
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
EmitSetHomeObjectIfNeeded(it->second->getter, 2);
EmitAccessor(it->second->setter);
EmitSetHomeObjectIfNeeded(it->second->setter, 3);
__ Mov(x10, Smi::FromInt(NONE));
__ Push(x10);
__ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
}
if (expr->has_function()) {
DCHECK(result_saved);
__ Peek(x0, 0);
__ Push(x0);
__ CallRuntime(Runtime::kToFastProperties, 1);
}
if (result_saved) {
context()->PlugTOS();
} else {
context()->Plug(x0);
}
}
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
expr->BuildConstantElements(isolate());
int flags = (expr->depth() == 1) ? ArrayLiteral::kShallowElements
: ArrayLiteral::kNoFlags;
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
Handle<FixedArray> constant_elements = expr->constant_elements();
DCHECK_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
bool has_fast_elements = IsFastObjectElementsKind(constant_elements_kind);
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
// If the only customer of allocation sites is transitioning, then
// we can turn it off if we don't have anywhere else to transition to.
allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
}
__ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Ldr(x3, FieldMemOperand(x3, JSFunction::kLiteralsOffset));
__ Mov(x2, Smi::FromInt(expr->literal_index()));
__ Mov(x1, Operand(constant_elements));
if (expr->depth() > 1 || length > JSObject::kInitialMaxFastElementArray) {
__ Mov(x0, Smi::FromInt(flags));
__ Push(x3, x2, x1, x0);
__ CallRuntime(Runtime::kCreateArrayLiteral, 4);
} else {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
__ CallStub(&stub);
}
bool result_saved = false; // Is the result saved to the stack?
// Emit code to evaluate all the non-constant subexpressions and to store
// them into the newly cloned array.
for (int i = 0; i < length; i++) {
Expression* subexpr = subexprs->at(i);
// If the subexpression is a literal or a simple materialized literal it
// is already set in the cloned array.
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
if (!result_saved) {
__ Mov(x1, Smi::FromInt(expr->literal_index()));
__ Push(x0, x1);
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
if (IsFastObjectElementsKind(constant_elements_kind)) {
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
__ Peek(x6, kPointerSize); // Copy of array literal.
__ Ldr(x1, FieldMemOperand(x6, JSObject::kElementsOffset));
__ Str(result_register(), FieldMemOperand(x1, offset));
// Update the write barrier for the array store.
__ RecordWriteField(x1, offset, result_register(), x10,
kLRHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
} else {
__ Mov(x3, Smi::FromInt(i));
StoreArrayLiteralElementStub stub(isolate());
__ CallStub(&stub);
}
PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
}
if (result_saved) {
__ Drop(1); // literal index
context()->PlugTOS();
} else {
context()->Plug(x0);
}
}
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
DCHECK(expr->target()->IsValidReferenceExpression());
Comment cmnt(masm_, "[ Assignment");
Property* property = expr->target()->AsProperty();
LhsKind assign_type = GetAssignType(property);
// Evaluate LHS expression.
switch (assign_type) {
case VARIABLE:
// Nothing to do here.
break;
case NAMED_PROPERTY:
if (expr->is_compound()) {
// We need the receiver both on the stack and in the register.
VisitForStackValue(property->obj());
__ Peek(LoadDescriptor::ReceiverRegister(), 0);
} else {
VisitForStackValue(property->obj());
}
break;
case NAMED_SUPER_PROPERTY:
VisitForStackValue(property->obj()->AsSuperReference()->this_var());
EmitLoadHomeObject(property->obj()->AsSuperReference());
__ Push(result_register());
if (expr->is_compound()) {
const Register scratch = x10;
__ Peek(scratch, kPointerSize);
__ Push(scratch, result_register());
}
break;
case KEYED_SUPER_PROPERTY:
VisitForStackValue(property->obj()->AsSuperReference()->this_var());
EmitLoadHomeObject(property->obj()->AsSuperReference());
__ Push(result_register());
VisitForAccumulatorValue(property->key());
__ Push(result_register());
if (expr->is_compound()) {
const Register scratch1 = x10;
const Register scratch2 = x11;
__ Peek(scratch1, 2 * kPointerSize);
__ Peek(scratch2, kPointerSize);
__ Push(scratch1, scratch2, result_register());
}
break;
case KEYED_PROPERTY:
if (expr->is_compound()) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
__ Peek(LoadDescriptor::ReceiverRegister(), 1 * kPointerSize);
__ Peek(LoadDescriptor::NameRegister(), 0);
} else {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
}
break;
}
// For compound assignments we need another deoptimization point after the
// variable/property load.
if (expr->is_compound()) {
{ AccumulatorValueContext context(this);
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy());
PrepareForBailout(expr->target(), TOS_REG);
break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
case NAMED_SUPER_PROPERTY:
EmitNamedSuperPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
case KEYED_SUPER_PROPERTY:
EmitKeyedSuperPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
}
}
Token::Value op = expr->binary_op();
__ Push(x0); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
? OVERWRITE_RIGHT
: NO_OVERWRITE;
SetSourcePosition(expr->position() + 1);
AccumulatorValueContext context(this);
if (ShouldInlineSmiCase(op)) {
EmitInlineSmiBinaryOp(expr->binary_operation(),
op,
mode,
expr->target(),
expr->value());
} else {
EmitBinaryOp(expr->binary_operation(), op, mode);
}
// Deoptimization point in case the binary operation may have side effects.
PrepareForBailout(expr->binary_operation(), TOS_REG);
} else {
VisitForAccumulatorValue(expr->value());
}
// Record source position before possible IC call.
SetSourcePosition(expr->position());
// Store the value.
switch (assign_type) {
case VARIABLE:
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
expr->op());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(x0);
break;
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
case NAMED_SUPER_PROPERTY:
EmitNamedSuperPropertyStore(property);
context()->Plug(x0);
break;
case KEYED_SUPER_PROPERTY:
EmitKeyedSuperPropertyStore(property);
context()->Plug(x0);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyAssignment(expr);
break;
}
}
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
DCHECK(!prop->IsSuperAccess());
__ Mov(LoadDescriptor::NameRegister(), Operand(key->value()));
if (FLAG_vector_ics) {
__ Mov(VectorLoadICDescriptor::SlotRegister(),
SmiFromSlot(prop->PropertyFeedbackSlot()));
CallLoadIC(NOT_CONTEXTUAL);
} else {
CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
}
}
void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object.
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
DCHECK(!key->value()->IsSmi());
DCHECK(prop->IsSuperAccess());
__ Push(key->value());
__ CallRuntime(Runtime::kLoadFromSuper, 3);
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
// Call keyed load IC. It has arguments key and receiver in x0 and x1.
Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
if (FLAG_vector_ics) {
__ Mov(VectorLoadICDescriptor::SlotRegister(),
SmiFromSlot(prop->PropertyFeedbackSlot()));
CallIC(ic);
} else {
CallIC(ic, prop->PropertyFeedbackId());
}
}
void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object, key.
SetSourcePosition(prop->position());
__ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
}
void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode,
Expression* left_expr,
Expression* right_expr) {
Label done, both_smis, stub_call;
// Get the arguments.
Register left = x1;
Register right = x0;
Register result = x0;
__ Pop(left);
// Perform combined smi check on both operands.
__ Orr(x10, left, right);
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(x10, &both_smis);
__ Bind(&stub_call);
Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
{
Assembler::BlockPoolsScope scope(masm_);
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
}
__ B(&done);
__ Bind(&both_smis);
// Smi case. This code works in the same way as the smi-smi case in the type
// recording binary operation stub, see
// BinaryOpStub::GenerateSmiSmiOperation for comments.
// TODO(all): That doesn't exist any more. Where are the comments?
//
// The set of operations that needs to be supported here is controlled by
// FullCodeGenerator::ShouldInlineSmiCase().
switch (op) {
case Token::SAR:
__ Ubfx(right, right, kSmiShift, 5);
__ Asr(result, left, right);
__ Bic(result, result, kSmiShiftMask);
break;
case Token::SHL:
__ Ubfx(right, right, kSmiShift, 5);
__ Lsl(result, left, right);
break;
case Token::SHR:
// If `left >>> right` >= 0x80000000, the result is not representable in a
// signed 32-bit smi.
__ Ubfx(right, right, kSmiShift, 5);
__ Lsr(x10, left, right);
__ Tbnz(x10, kXSignBit, &stub_call);
__ Bic(result, x10, kSmiShiftMask);
break;
case Token::ADD:
__ Adds(x10, left, right);
__ B(vs, &stub_call);
__ Mov(result, x10);
break;
case Token::SUB:
__ Subs(x10, left, right);
__ B(vs, &stub_call);
__ Mov(result, x10);
break;
case Token::MUL: {
Label not_minus_zero, done;
STATIC_ASSERT(static_cast<unsigned>(kSmiShift) == (kXRegSizeInBits / 2));
STATIC_ASSERT(kSmiTag == 0);
__ Smulh(x10, left, right);
__ Cbnz(x10, &not_minus_zero);
__ Eor(x11, left, right);
__ Tbnz(x11, kXSignBit, &stub_call);
__ Mov(result, x10);
__ B(&done);
__ Bind(&not_minus_zero);
__ Cls(x11, x10);
__ Cmp(x11, kXRegSizeInBits - kSmiShift);
__ B(lt, &stub_call);
__ SmiTag(result, x10);
__ Bind(&done);
break;
}
case Token::BIT_OR:
__ Orr(result, left, right);
break;
case Token::BIT_AND:
__ And(result, left, right);
break;
case Token::BIT_XOR:
__ Eor(result, left, right);
break;
default:
UNREACHABLE();
}
__ Bind(&done);
context()->Plug(x0);
}
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
__ Pop(x1);
Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
JumpPatchSite patch_site(masm_); // Unbound, signals no inlined smi code.
{
Assembler::BlockPoolsScope scope(masm_);
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
}
context()->Plug(x0);
}
void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// Constructor is in x0.
DCHECK(lit != NULL);
__ push(x0);
// No access check is needed here since the constructor is created by the
// class literal.
Register scratch = x1;
__ Ldr(scratch,
FieldMemOperand(x0, JSFunction::kPrototypeOrInitialMapOffset));
__ Push(scratch);
for (int i = 0; i < lit->properties()->length(); i++) {
ObjectLiteral::Property* property = lit->properties()->at(i);
Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
DCHECK(key != NULL);
if (property->is_static()) {
__ Peek(scratch, kPointerSize); // constructor
} else {
__ Peek(scratch, 0); // prototype
}
__ Push(scratch);
VisitForStackValue(key);
VisitForStackValue(value);
EmitSetHomeObjectIfNeeded(value, 2);
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
case ObjectLiteral::Property::COMPUTED:
case ObjectLiteral::Property::PROTOTYPE:
__ CallRuntime(Runtime::kDefineClassMethod, 3);
break;
case ObjectLiteral::Property::GETTER:
__ CallRuntime(Runtime::kDefineClassGetter, 3);
break;
case ObjectLiteral::Property::SETTER:
__ CallRuntime(Runtime::kDefineClassSetter, 3);
break;
default:
UNREACHABLE();
}
}
// prototype
__ CallRuntime(Runtime::kToFastProperties, 1);
// constructor
__ CallRuntime(Runtime::kToFastProperties, 1);
}
void FullCodeGenerator::EmitAssignment(Expression* expr) {
DCHECK(expr->IsValidReferenceExpression());
Property* prop = expr->AsProperty();
LhsKind assign_type = GetAssignType(prop);
switch (assign_type) {
case VARIABLE: {
Variable* var = expr->AsVariableProxy()->var();
EffectContext context(this);
EmitVariableAssignment(var, Token::ASSIGN);
break;
}
case NAMED_PROPERTY: {
__ Push(x0); // Preserve value.
VisitForAccumulatorValue(prop->obj());
// TODO(all): We could introduce a VisitForRegValue(reg, expr) to avoid
// this copy.
__ Mov(StoreDescriptor::ReceiverRegister(), x0);
__ Pop(StoreDescriptor::ValueRegister()); // Restore value.
__ Mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
CallStoreIC();
break;
}
case NAMED_SUPER_PROPERTY: {
__ Push(x0);
VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
EmitLoadHomeObject(prop->obj()->AsSuperReference());
// stack: value, this; x0: home_object
Register scratch = x10;
Register scratch2 = x11;
__ mov(scratch, result_register()); // home_object
__ Peek(x0, kPointerSize); // value
__ Peek(scratch2, 0); // this
__ Poke(scratch2, kPointerSize); // this
__ Poke(scratch, 0); // home_object
// stack: this, home_object; x0: value
EmitNamedSuperPropertyStore(prop);
break;
}
case KEYED_SUPER_PROPERTY: {
__ Push(x0);
VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
EmitLoadHomeObject(prop->obj()->AsSuperReference());
__ Push(result_register());
VisitForAccumulatorValue(prop->key());
Register scratch = x10;
Register scratch2 = x11;
__ Peek(scratch2, 2 * kPointerSize); // value
// stack: value, this, home_object; x0: key, x11: value
__ Peek(scratch, kPointerSize); // this
__ Poke(scratch, 2 * kPointerSize);
__ Peek(scratch, 0); // home_object
__ Poke(scratch, kPointerSize);
__ Poke(x0, 0);
__ Move(x0, scratch2);
// stack: this, home_object, key; x0: value.
EmitKeyedSuperPropertyStore(prop);
break;
}
case KEYED_PROPERTY: {
__ Push(x0); // Preserve value.
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ Mov(StoreDescriptor::NameRegister(), x0);
__ Pop(StoreDescriptor::ReceiverRegister(),
StoreDescriptor::ValueRegister());
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
CallIC(ic);
break;
}
}
context()->Plug(x0);
}
void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
Variable* var, MemOperand location) {
__ Str(result_register(), location);
if (var->IsContextSlot()) {
// RecordWrite may destroy all its register arguments.
__ Mov(x10, result_register());
int offset = Context::SlotOffset(var->index());
__ RecordWriteContextSlot(
x1, offset, x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
}
}
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) {
ASM_LOCATION("FullCodeGenerator::EmitVariableAssignment");
if (var->IsUnallocated()) {
// Global var, const, or let.
__ Mov(StoreDescriptor::NameRegister(), Operand(var->name()));
__ Ldr(StoreDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
CallStoreIC();
} else if (op == Token::INIT_CONST_LEGACY) {
// Const initializers need a write barrier.
DCHECK(!var->IsParameter()); // No const parameters.
if (var->IsLookupSlot()) {
__ Mov(x1, Operand(var->name()));
__ Push(x0, cp, x1);
__ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
} else {
DCHECK(var->IsStackLocal() || var->IsContextSlot());
Label skip;
MemOperand location = VarOperand(var, x1);
__ Ldr(x10, location);
__ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &skip);
EmitStoreToStackLocalOrContextSlot(var, location);
__ Bind(&skip);
}
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label assign;
MemOperand location = VarOperand(var, x1);
__ Ldr(x10, location);
__ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &assign);
__ Mov(x10, Operand(var->name()));
__ Push(x10);
__ CallRuntime(Runtime::kThrowReferenceError, 1);
// Perform the assignment.
__ Bind(&assign);
EmitStoreToStackLocalOrContextSlot(var, location);
} else if (!var->is_const_mode() || op == Token::INIT_CONST) {
if (var->IsLookupSlot()) {
// Assignment to var.
__ Mov(x11, Operand(var->name()));
__ Mov(x10, Smi::FromInt(strict_mode()));
// jssp[0] : mode.
// jssp[8] : name.
// jssp[16] : context.
// jssp[24] : value.
__ Push(x0, cp, x11, x10);
__ CallRuntime(Runtime::kStoreLookupSlot, 4);
} else {
// Assignment to var or initializing assignment to let/const in harmony
// mode.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
MemOperand location = VarOperand(var, x1);
if (FLAG_debug_code && op == Token::INIT_LET) {
__ Ldr(x10, location);
__ CompareRoot(x10, Heap::kTheHoleValueRootIndex);
__ Check(eq, kLetBindingReInitialization);
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
} else if (IsSignallingAssignmentToConst(var, op, strict_mode())) {
__ CallRuntime(Runtime::kThrowConstAssignError, 0);
}
}
void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
ASM_LOCATION("FullCodeGenerator::EmitNamedPropertyAssignment");
// Assignment to a property, using a named store IC.
Property* prop = expr->target()->AsProperty();
DCHECK(prop != NULL);
DCHECK(prop->key()->IsLiteral());
// Record source code position before IC call.
SetSourcePosition(expr->position());
__ Mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
__ Pop(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(x0);
}
void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
// Assignment to named property of super.
// x0 : value
// stack : receiver ('this'), home_object
DCHECK(prop != NULL);
Literal* key = prop->key()->AsLiteral();
DCHECK(key != NULL);
__ Push(key->value());
__ Push(x0);
__ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreToSuper_Strict
: Runtime::kStoreToSuper_Sloppy),
4);
}
void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
// Assignment to named property of super.
// x0 : value
// stack : receiver ('this'), home_object, key
DCHECK(prop != NULL);
__ Push(x0);
__ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreKeyedToSuper_Strict
: Runtime::kStoreKeyedToSuper_Sloppy),
4);
}
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
ASM_LOCATION("FullCodeGenerator::EmitKeyedPropertyAssignment");
// Assignment to a property, using a keyed store IC.
// Record source code position before IC call.
SetSourcePosition(expr->position());
// TODO(all): Could we pass this in registers rather than on the stack?
__ Pop(StoreDescriptor::NameRegister(), StoreDescriptor::ReceiverRegister());
DCHECK(StoreDescriptor::ValueRegister().is(x0));
Handle<Code> ic = CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(x0);
}
void FullCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm_, "[ Property");
Expression* key = expr->key();
if (key->IsPropertyName()) {
if (!expr->IsSuperAccess()) {
VisitForAccumulatorValue(expr->obj());
__ Move(LoadDescriptor::ReceiverRegister(), x0);
EmitNamedPropertyLoad(expr);
} else {
VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
EmitLoadHomeObject(expr->obj()->AsSuperReference());
__ Push(result_register());
EmitNamedSuperPropertyLoad(expr);
}
PrepareForBailoutForId(expr->LoadId(), TOS_REG);
context()->Plug(x0);
} else {
if (!expr->IsSuperAccess()) {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
__ Move(LoadDescriptor::NameRegister(), x0);
__ Pop(LoadDescriptor::ReceiverRegister());
EmitKeyedPropertyLoad(expr);
} else {
VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
EmitLoadHomeObject(expr->obj()->AsSuperReference());
__ Push(result_register());
VisitForStackValue(expr->key());
EmitKeyedSuperPropertyLoad(expr);
}
context()->Plug(x0);
}
}
void FullCodeGenerator::CallIC(Handle<Code> code,
TypeFeedbackId ast_id) {
ic_total_count_++;
// All calls must have a predictable size in full-codegen code to ensure that
// the debugger can patch them correctly.
__ Call(code, RelocInfo::CODE_TARGET, ast_id);
}
// Code common for calls using the IC.
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
CallICState::CallType call_type =
callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
// Get the target function.
if (call_type == CallICState::FUNCTION) {
{ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
PrepareForBailout(callee, NO_REGISTERS);
}
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
__ Push(isolate()->factory()->undefined_value());
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
DCHECK(!callee->AsProperty()->IsSuperAccess());
__ Peek(LoadDescriptor::ReceiverRegister(), 0);
EmitNamedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
__ Pop(x10);
__ Push(x0, x10);
}
EmitCall(expr, call_type);
}
void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
DCHECK(callee->IsProperty());
Property* prop = callee->AsProperty();
DCHECK(prop->IsSuperAccess());
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
DCHECK(!key->value()->IsSmi());
// Load the function from the receiver.
const Register scratch = x10;
SuperReference* super_ref = callee->AsProperty()->obj()->AsSuperReference();
EmitLoadHomeObject(super_ref);
__ Push(x0);
VisitForAccumulatorValue(super_ref->this_var());
__ Push(x0);
__ Peek(scratch, kPointerSize);
__ Push(x0, scratch);
__ Push(key->value());
// Stack here:
// - home_object
// - this (receiver)
// - this (receiver) <-- LoadFromSuper will pop here and below.
// - home_object
// - key
__ CallRuntime(Runtime::kLoadFromSuper, 3);
// Replace home_object with target function.
__ Poke(x0, kPointerSize);
// Stack here:
// - target function
// - this (receiver)
EmitCall(expr, CallICState::METHOD);
}
// Code common for calls using the IC.
void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
Expression* key) {
// Load the key.
VisitForAccumulatorValue(key);
Expression* callee = expr->expression();
// Load the function from the receiver.
DCHECK(callee->IsProperty());
__ Peek(LoadDescriptor::ReceiverRegister(), 0);
__ Move(LoadDescriptor::NameRegister(), x0);
EmitKeyedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
__ Pop(x10);
__ Push(x0, x10);
EmitCall(expr, CallICState::METHOD);
}
void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
DCHECK(callee->IsProperty());
Property* prop = callee->AsProperty();
DCHECK(prop->IsSuperAccess());
SetSourcePosition(prop->position());
// Load the function from the receiver.
const Register scratch = x10;
SuperReference* super_ref = callee->AsProperty()->obj()->AsSuperReference();
EmitLoadHomeObject(super_ref);
__ Push(x0);
VisitForAccumulatorValue(super_ref->this_var());
__ Push(x0);
__ Peek(scratch, kPointerSize);
__ Push(x0, scratch);
VisitForStackValue(prop->key());
// Stack here:
// - home_object
// - this (receiver)
// - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
// - home_object
// - key
__ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
// Replace home_object with target function.
__ Poke(x0, kPointerSize);
// Stack here:
// - target function
// - this (receiver)
EmitCall(expr, CallICState::METHOD);
}
void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
{ PreservePositionScope scope(masm()->positions_recorder());
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
}
// Record source position of the IC call.
SetSourcePosition(expr->position());
Handle<Code> ic = CallIC::initialize_stub(
isolate(), arg_count, call_type);
__ Mov(x3, SmiFromSlot(expr->CallFeedbackSlot()));
__ Peek(x1, (arg_count + 1) * kXRegSize);
// Don't assign a type feedback id to the IC, since type feedback is provided
// by the vector above.
CallIC(ic);
RecordJSReturnSite(expr);
// Restore context register.
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, x0);
}
void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
ASM_LOCATION("FullCodeGenerator::EmitResolvePossiblyDirectEval");
// Prepare to push a copy of the first argument or undefined if it doesn't
// exist.
if (arg_count > 0) {
__ Peek(x9, arg_count * kXRegSize);
} else {
__ LoadRoot(x9, Heap::kUndefinedValueRootIndex);
}
__ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
// Prepare to push the receiver of the enclosing function.
int receiver_offset = 2 + info_->scope()->num_parameters();
__ Ldr(x11, MemOperand(fp, receiver_offset * kPointerSize));
// Prepare to push the language mode.
__ Mov(x12, Smi::FromInt(strict_mode()));
// Prepare to push the start position of the scope the calls resides in.
__ Mov(x13, Smi::FromInt(scope()->start_position()));
// Push.
__ Push(x9, x10, x11, x12, x13);
// Do the runtime call.
__ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
}
void FullCodeGenerator::EmitLoadSuperConstructor(SuperReference* super_ref) {
DCHECK(super_ref != NULL);
__ ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Push(x0);
__ CallRuntime(Runtime::kGetPrototype, 1);
}
void FullCodeGenerator::VisitCall(Call* expr) {
#ifdef DEBUG
// We want to verify that RecordJSReturnSite gets called on all paths
// through this function. Avoid early returns.
expr->return_is_recorded_ = false;
#endif
Comment cmnt(masm_, "[ Call");
Expression* callee = expr->expression();
Call::CallType call_type = expr->GetCallType(isolate());
if (call_type == Call::POSSIBLY_EVAL_CALL) {
// In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
// to resolve the function we need to call and the receiver of the
// call. Then we call the resolved function using the given
// arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
{
PreservePositionScope pos_scope(masm()->positions_recorder());
VisitForStackValue(callee);
__ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
__ Push(x10); // Reserved receiver slot.
// Push the arguments.
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
// Push a copy of the function (found below the arguments) and
// resolve eval.
__ Peek(x10, (arg_count + 1) * kPointerSize);
__ Push(x10);
EmitResolvePossiblyDirectEval(arg_count);
// The runtime call returns a pair of values in x0 (function) and
// x1 (receiver). Touch up the stack with the right values.
__ PokePair(x1, x0, arg_count * kPointerSize);
PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
}
// Record source position for debugger.
SetSourcePosition(expr->position());
// Call the evaluated function.
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ Peek(x1, (arg_count + 1) * kXRegSize);
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, x0);
} else if (call_type == Call::GLOBAL_CALL) {
EmitCallWithLoadIC(expr);
} else if (call_type == Call::LOOKUP_SLOT_CALL) {
// Call to a lookup slot (dynamically introduced variable).
VariableProxy* proxy = callee->AsVariableProxy();
Label slow, done;
{ PreservePositionScope scope(masm()->positions_recorder());
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
}
__ Bind(&slow);
// Call the runtime to find the function to call (returned in x0)
// and the object holding it (returned in x1).
__ Mov(x10, Operand(proxy->name()));
__ Push(context_register(), x10);
__ CallRuntime(Runtime::kLoadLookupSlot, 2);
__ Push(x0, x1); // Receiver, function.
PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
// If fast case code has been generated, emit code to push the
// function and receiver and have the slow path jump around this
// code.
if (done.is_linked()) {
Label call;
__ B(&call);
__ Bind(&done);
// Push function.
// The receiver is implicitly the global receiver. Indicate this
// by passing the undefined to the call function stub.
__ LoadRoot(x1, Heap::kUndefinedValueRootIndex);
__ Push(x0, x1);
__ Bind(&call);
}
// The receiver is either the global receiver or an object found
// by LoadContextSlot.
EmitCall(expr);
} else if (call_type == Call::PROPERTY_CALL) {
Property* property = callee->AsProperty();
bool is_named_call = property->key()->IsPropertyName();
if (property->IsSuperAccess()) {
if (is_named_call) {
EmitSuperCallWithLoadIC(expr);
} else {
EmitKeyedSuperCallWithLoadIC(expr);
}
} else {
{
PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(property->obj());
}
if (is_named_call) {
EmitCallWithLoadIC(expr);
} else {
EmitKeyedCallWithLoadIC(expr, property->key());
}
}
} else if (call_type == Call::SUPER_CALL) {
SuperReference* super_ref = callee->AsSuperReference();
EmitLoadSuperConstructor(super_ref);
__ Push(result_register());
VisitForStackValue(super_ref->this_var());
EmitCall(expr, CallICState::METHOD);
} else {
DCHECK(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
{ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(callee);
}
__ LoadRoot(x1, Heap::kUndefinedValueRootIndex);
__ Push(x1);
// Emit function call.
EmitCall(expr);
}
#ifdef DEBUG
// RecordJSReturnSite should have been called.
DCHECK(expr->return_is_recorded_);
#endif
}
void FullCodeGenerator::VisitCallNew(CallNew* expr) {
Comment cmnt(masm_, "[ CallNew");
// According to ECMA-262, section 11.2.2, page 44, the function
// expression in new calls must be evaluated before the
// arguments.
// Push constructor on the stack. If it's not a function it's used as
// receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
// ignored.
if (expr->expression()->IsSuperReference()) {
EmitLoadSuperConstructor(expr->expression()->AsSuperReference());
__ Push(result_register());
} else {
VisitForStackValue(expr->expression());
}
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
// Call the construct call builtin that handles allocation and
// constructor invocation.
SetSourcePosition(expr->position());
// Load function and argument count into x1 and x0.
__ Mov(x0, arg_count);
__ Peek(x1, arg_count * kXRegSize);
// Record call targets in unoptimized code.
if (FLAG_pretenuring_call_new) {
EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
expr->CallNewFeedbackSlot().ToInt() + 1);
}
__ LoadObject(x2, FeedbackVector());
__ Mov(x3, SmiFromSlot(expr->CallNewFeedbackSlot()));
CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Pl