| // Copyright 2012 the V8 project authors. All rights reserved. |
| // Redistribution and use in source and binary forms, with or without |
| // modification, are permitted provided that the following conditions are |
| // met: |
| // |
| // * Redistributions of source code must retain the above copyright |
| // notice, this list of conditions and the following disclaimer. |
| // * Redistributions in binary form must reproduce the above |
| // copyright notice, this list of conditions and the following |
| // disclaimer in the documentation and/or other materials provided |
| // with the distribution. |
| // * Neither the name of Google Inc. nor the names of its |
| // contributors may be used to endorse or promote products derived |
| // from this software without specific prior written permission. |
| // |
| // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| |
| #include "v8.h" |
| |
| #if V8_TARGET_ARCH_ARM |
| |
| #include "bootstrapper.h" |
| #include "code-stubs.h" |
| #include "regexp-macro-assembler.h" |
| #include "stub-cache.h" |
| |
| namespace v8 { |
| namespace internal { |
| |
| |
| void ToNumberStub::InitializeInterfaceDescriptor( |
| Isolate* isolate, |
| CodeStubInterfaceDescriptor* descriptor) { |
| static Register registers[] = { r0 }; |
| descriptor->register_param_count_ = 1; |
| descriptor->register_params_ = registers; |
| descriptor->deoptimization_handler_ = NULL; |
| } |
| |
| |
| void FastCloneShallowArrayStub::InitializeInterfaceDescriptor( |
| Isolate* isolate, |
| CodeStubInterfaceDescriptor* descriptor) { |
| static Register registers[] = { r3, r2, r1 }; |
| descriptor->register_param_count_ = 3; |
| descriptor->register_params_ = registers; |
| descriptor->deoptimization_handler_ = |
| Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry; |
| } |
| |
| |
| void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( |
| Isolate* isolate, |
| CodeStubInterfaceDescriptor* descriptor) { |
| static Register registers[] = { r3, r2, r1, r0 }; |
| descriptor->register_param_count_ = 4; |
| descriptor->register_params_ = registers; |
| descriptor->deoptimization_handler_ = |
| Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry; |
| } |
| |
| |
| void CreateAllocationSiteStub::InitializeInterfaceDescriptor( |
| Isolate* isolate, |
| CodeStubInterfaceDescriptor* descriptor) { |
| static Register registers[] = { r2 }; |
| descriptor->register_param_count_ = 1; |
| descriptor->register_params_ = registers; |
| descriptor->deoptimization_handler_ = NULL; |
| } |
| |
| |
| void KeyedLoadFastElementStub::InitializeInterfaceDescriptor( |
| Isolate* isolate, |
| CodeStubInterfaceDescriptor* descriptor) { |
| static Register registers[] = { r1, r0 }; |
| descriptor->register_param_count_ = 2; |
| descriptor->register_params_ = registers; |
| descriptor->deoptimization_handler_ = |
| FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure); |
| } |
| |
| |
| void LoadFieldStub::InitializeInterfaceDescriptor( |
| Isolate* isolate, |
| CodeStubInterfaceDescriptor* descriptor) { |
| static Register registers[] = { r0 }; |
| descriptor->register_param_count_ = 1; |
| descriptor->register_params_ = registers; |
| descriptor->deoptimization_handler_ = NULL; |
| } |
| |
| |
| void KeyedLoadFieldStub::InitializeInterfaceDescriptor( |
| Isolate* isolate, |
| CodeStubInterfaceDescriptor* descriptor) { |
| static Register registers[] = { r1 }; |
| descriptor->register_param_count_ = 1; |
| descriptor->register_params_ = registers; |
| descriptor->deoptimization_handler_ = NULL; |
| } |
| |
| |
| void KeyedStoreFastElementStub::InitializeInterfaceDescriptor( |
| Isolate* isolate, |
| CodeStubInterfaceDescriptor* descriptor) { |
| static Register registers[] = { r2, r1, r0 }; |
| descriptor->register_param_count_ = 3; |
| descriptor->register_params_ = registers; |
| descriptor->deoptimization_handler_ = |
| FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure); |
| } |
| |
| |
| void TransitionElementsKindStub::InitializeInterfaceDescriptor( |
| Isolate* isolate, |
| CodeStubInterfaceDescriptor* descriptor) { |
| static Register registers[] = { r0, r1 }; |
| descriptor->register_param_count_ = 2; |
| descriptor->register_params_ = registers; |
| Address entry = |
| Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry; |
| descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry); |
| } |
| |
| |
| void CompareNilICStub::InitializeInterfaceDescriptor( |
| Isolate* isolate, |
| CodeStubInterfaceDescriptor* descriptor) { |
| static Register registers[] = { r0 }; |
| descriptor->register_param_count_ = 1; |
| descriptor->register_params_ = registers; |
| descriptor->deoptimization_handler_ = |
| FUNCTION_ADDR(CompareNilIC_Miss); |
| descriptor->SetMissHandler( |
| ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate)); |
| } |
| |
| |
| static void InitializeArrayConstructorDescriptor( |
| Isolate* isolate, |
| CodeStubInterfaceDescriptor* descriptor, |
| int constant_stack_parameter_count) { |
| // register state |
| // r0 -- number of arguments |
| // r1 -- function |
| // r2 -- type info cell with elements kind |
| static Register registers[] = { r1, r2 }; |
| descriptor->register_param_count_ = 2; |
| if (constant_stack_parameter_count != 0) { |
| // stack param count needs (constructor pointer, and single argument) |
| descriptor->stack_parameter_count_ = &r0; |
| } |
| descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; |
| descriptor->register_params_ = registers; |
| descriptor->function_mode_ = JS_FUNCTION_STUB_MODE; |
| descriptor->deoptimization_handler_ = |
| Runtime::FunctionForId(Runtime::kArrayConstructor)->entry; |
| } |
| |
| |
| static void InitializeInternalArrayConstructorDescriptor( |
| Isolate* isolate, |
| CodeStubInterfaceDescriptor* descriptor, |
| int constant_stack_parameter_count) { |
| // register state |
| // r0 -- number of arguments |
| // r1 -- constructor function |
| static Register registers[] = { r1 }; |
| descriptor->register_param_count_ = 1; |
| |
| if (constant_stack_parameter_count != 0) { |
| // stack param count needs (constructor pointer, and single argument) |
| descriptor->stack_parameter_count_ = &r0; |
| } |
| descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; |
| descriptor->register_params_ = registers; |
| descriptor->function_mode_ = JS_FUNCTION_STUB_MODE; |
| descriptor->deoptimization_handler_ = |
| Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry; |
| } |
| |
| |
| void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( |
| Isolate* isolate, |
| CodeStubInterfaceDescriptor* descriptor) { |
| InitializeArrayConstructorDescriptor(isolate, descriptor, 0); |
| } |
| |
| |
| void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( |
| Isolate* isolate, |
| CodeStubInterfaceDescriptor* descriptor) { |
| InitializeArrayConstructorDescriptor(isolate, descriptor, 1); |
| } |
| |
| |
| void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( |
| Isolate* isolate, |
| CodeStubInterfaceDescriptor* descriptor) { |
| InitializeArrayConstructorDescriptor(isolate, descriptor, -1); |
| } |
| |
| |
| void ToBooleanStub::InitializeInterfaceDescriptor( |
| Isolate* isolate, |
| CodeStubInterfaceDescriptor* descriptor) { |
| static Register registers[] = { r0 }; |
| descriptor->register_param_count_ = 1; |
| descriptor->register_params_ = registers; |
| descriptor->deoptimization_handler_ = |
| FUNCTION_ADDR(ToBooleanIC_Miss); |
| descriptor->SetMissHandler( |
| ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate)); |
| } |
| |
| |
| void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( |
| Isolate* isolate, |
| CodeStubInterfaceDescriptor* descriptor) { |
| InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0); |
| } |
| |
| |
| void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( |
| Isolate* isolate, |
| CodeStubInterfaceDescriptor* descriptor) { |
| InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1); |
| } |
| |
| |
| void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( |
| Isolate* isolate, |
| CodeStubInterfaceDescriptor* descriptor) { |
| InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1); |
| } |
| |
| |
| void StoreGlobalStub::InitializeInterfaceDescriptor( |
| Isolate* isolate, |
| CodeStubInterfaceDescriptor* descriptor) { |
| static Register registers[] = { r1, r2, r0 }; |
| descriptor->register_param_count_ = 3; |
| descriptor->register_params_ = registers; |
| descriptor->deoptimization_handler_ = |
| FUNCTION_ADDR(StoreIC_MissFromStubFailure); |
| } |
| |
| |
| void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor( |
| Isolate* isolate, |
| CodeStubInterfaceDescriptor* descriptor) { |
| static Register registers[] = { r0, r3, r1, r2 }; |
| descriptor->register_param_count_ = 4; |
| descriptor->register_params_ = registers; |
| descriptor->deoptimization_handler_ = |
| FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss); |
| } |
| |
| |
| #define __ ACCESS_MASM(masm) |
| |
| |
| static void EmitIdenticalObjectComparison(MacroAssembler* masm, |
| Label* slow, |
| Condition cond); |
| static void EmitSmiNonsmiComparison(MacroAssembler* masm, |
| Register lhs, |
| Register rhs, |
| Label* lhs_not_nan, |
| Label* slow, |
| bool strict); |
| static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, |
| Register lhs, |
| Register rhs); |
| |
| |
| void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { |
| // Update the static counter each time a new code stub is generated. |
| Isolate* isolate = masm->isolate(); |
| isolate->counters()->code_stubs()->Increment(); |
| |
| CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate); |
| int param_count = descriptor->register_param_count_; |
| { |
| // Call the runtime system in a fresh internal frame. |
| FrameScope scope(masm, StackFrame::INTERNAL); |
| ASSERT(descriptor->register_param_count_ == 0 || |
| r0.is(descriptor->register_params_[param_count - 1])); |
| // Push arguments |
| for (int i = 0; i < param_count; ++i) { |
| __ push(descriptor->register_params_[i]); |
| } |
| ExternalReference miss = descriptor->miss_handler(); |
| __ CallExternalReference(miss, descriptor->register_param_count_); |
| } |
| |
| __ Ret(); |
| } |
| |
| |
| void FastNewClosureStub::Generate(MacroAssembler* masm) { |
| // Create a new closure from the given function info in new |
| // space. Set the context to the current context in cp. |
| Counters* counters = masm->isolate()->counters(); |
| |
| Label gc; |
| |
| // Pop the function info from the stack. |
| __ pop(r3); |
| |
| // Attempt to allocate new JSFunction in new space. |
| __ Allocate(JSFunction::kSize, r0, r1, r2, &gc, TAG_OBJECT); |
| |
| __ IncrementCounter(counters->fast_new_closure_total(), 1, r6, r7); |
| |
| int map_index = Context::FunctionMapIndex(language_mode_, is_generator_); |
| |
| // Compute the function map in the current native context and set that |
| // as the map of the allocated object. |
| __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
| __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset)); |
| __ ldr(r5, MemOperand(r2, Context::SlotOffset(map_index))); |
| __ str(r5, FieldMemOperand(r0, HeapObject::kMapOffset)); |
| |
| // Initialize the rest of the function. We don't have to update the |
| // write barrier because the allocated object is in new space. |
| __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex); |
| __ LoadRoot(r5, Heap::kTheHoleValueRootIndex); |
| __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset)); |
| __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset)); |
| __ str(r5, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset)); |
| __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset)); |
| __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset)); |
| __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset)); |
| |
| // Initialize the code pointer in the function to be the one |
| // found in the shared function info object. |
| // But first check if there is an optimized version for our context. |
| Label check_optimized; |
| Label install_unoptimized; |
| if (FLAG_cache_optimized_code) { |
| __ ldr(r1, |
| FieldMemOperand(r3, SharedFunctionInfo::kOptimizedCodeMapOffset)); |
| __ tst(r1, r1); |
| __ b(ne, &check_optimized); |
| } |
| __ bind(&install_unoptimized); |
| __ LoadRoot(r4, Heap::kUndefinedValueRootIndex); |
| __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset)); |
| __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset)); |
| __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset)); |
| |
| // Return result. The argument function info has been popped already. |
| __ Ret(); |
| |
| __ bind(&check_optimized); |
| |
| __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, r6, r7); |
| |
| // r2 holds native context, r1 points to fixed array of 3-element entries |
| // (native context, optimized code, literals). |
| // The optimized code map must never be empty, so check the first elements. |
| Label install_optimized; |
| // Speculatively move code object into r4. |
| __ ldr(r4, FieldMemOperand(r1, SharedFunctionInfo::kFirstCodeSlot)); |
| __ ldr(r5, FieldMemOperand(r1, SharedFunctionInfo::kFirstContextSlot)); |
| __ cmp(r2, r5); |
| __ b(eq, &install_optimized); |
| |
| // Iterate through the rest of map backwards. r4 holds an index as a Smi. |
| Label loop; |
| __ ldr(r4, FieldMemOperand(r1, FixedArray::kLengthOffset)); |
| __ bind(&loop); |
| // Do not double check first entry. |
| __ cmp(r4, Operand(Smi::FromInt(SharedFunctionInfo::kSecondEntryIndex))); |
| __ b(eq, &install_unoptimized); |
| __ sub(r4, r4, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength))); |
| __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| __ add(r5, r5, Operand::PointerOffsetFromSmiKey(r4)); |
| __ ldr(r5, MemOperand(r5)); |
| __ cmp(r2, r5); |
| __ b(ne, &loop); |
| // Hit: fetch the optimized code. |
| __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| __ add(r5, r5, Operand::PointerOffsetFromSmiKey(r4)); |
| __ add(r5, r5, Operand(kPointerSize)); |
| __ ldr(r4, MemOperand(r5)); |
| |
| __ bind(&install_optimized); |
| __ IncrementCounter(counters->fast_new_closure_install_optimized(), |
| 1, r6, r7); |
| |
| // TODO(fschneider): Idea: store proper code pointers in the map and either |
| // unmangle them on marking or do nothing as the whole map is discarded on |
| // major GC anyway. |
| __ add(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| __ str(r4, FieldMemOperand(r0, JSFunction::kCodeEntryOffset)); |
| |
| // Now link a function into a list of optimized functions. |
| __ ldr(r4, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST)); |
| |
| __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset)); |
| // No need for write barrier as JSFunction (eax) is in the new space. |
| |
| __ str(r0, ContextOperand(r2, Context::OPTIMIZED_FUNCTIONS_LIST)); |
| // Store JSFunction (eax) into edx before issuing write barrier as |
| // it clobbers all the registers passed. |
| __ mov(r4, r0); |
| __ RecordWriteContextSlot( |
| r2, |
| Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST), |
| r4, |
| r1, |
| kLRHasNotBeenSaved, |
| kDontSaveFPRegs); |
| |
| // Return result. The argument function info has been popped already. |
| __ Ret(); |
| |
| // Create a new closure through the slower runtime call. |
| __ bind(&gc); |
| __ LoadRoot(r4, Heap::kFalseValueRootIndex); |
| __ Push(cp, r3, r4); |
| __ TailCallRuntime(Runtime::kNewClosure, 3, 1); |
| } |
| |
| |
| void FastNewContextStub::Generate(MacroAssembler* masm) { |
| // Try to allocate the context in new space. |
| Label gc; |
| int length = slots_ + Context::MIN_CONTEXT_SLOTS; |
| |
| // Attempt to allocate the context in new space. |
| __ Allocate(FixedArray::SizeFor(length), r0, r1, r2, &gc, TAG_OBJECT); |
| |
| // Load the function from the stack. |
| __ ldr(r3, MemOperand(sp, 0)); |
| |
| // Set up the object header. |
| __ LoadRoot(r1, Heap::kFunctionContextMapRootIndex); |
| __ mov(r2, Operand(Smi::FromInt(length))); |
| __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset)); |
| __ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); |
| |
| // Set up the fixed slots, copy the global object from the previous context. |
| __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
| __ mov(r1, Operand(Smi::FromInt(0))); |
| __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX))); |
| __ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX))); |
| __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX))); |
| __ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
| |
| // Initialize the rest of the slots to undefined. |
| __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); |
| for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { |
| __ str(r1, MemOperand(r0, Context::SlotOffset(i))); |
| } |
| |
| // Remove the on-stack argument and return. |
| __ mov(cp, r0); |
| __ pop(); |
| __ Ret(); |
| |
| // Need to collect. Call into runtime system. |
| __ bind(&gc); |
| __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1); |
| } |
| |
| |
| void FastNewBlockContextStub::Generate(MacroAssembler* masm) { |
| // Stack layout on entry: |
| // |
| // [sp]: function. |
| // [sp + kPointerSize]: serialized scope info |
| |
| // Try to allocate the context in new space. |
| Label gc; |
| int length = slots_ + Context::MIN_CONTEXT_SLOTS; |
| __ Allocate(FixedArray::SizeFor(length), r0, r1, r2, &gc, TAG_OBJECT); |
| |
| // Load the function from the stack. |
| __ ldr(r3, MemOperand(sp, 0)); |
| |
| // Load the serialized scope info from the stack. |
| __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); |
| |
| // Set up the object header. |
| __ LoadRoot(r2, Heap::kBlockContextMapRootIndex); |
| __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); |
| __ mov(r2, Operand(Smi::FromInt(length))); |
| __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset)); |
| |
| // If this block context is nested in the native context we get a smi |
| // sentinel instead of a function. The block context should get the |
| // canonical empty function of the native context as its closure which |
| // we still have to look up. |
| Label after_sentinel; |
| __ JumpIfNotSmi(r3, &after_sentinel); |
| if (FLAG_debug_code) { |
| __ cmp(r3, Operand::Zero()); |
| __ Assert(eq, kExpected0AsASmiSentinel); |
| } |
| __ ldr(r3, GlobalObjectOperand()); |
| __ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset)); |
| __ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX)); |
| __ bind(&after_sentinel); |
| |
| // Set up the fixed slots, copy the global object from the previous context. |
| __ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); |
| __ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX)); |
| __ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX)); |
| __ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX)); |
| __ str(r2, ContextOperand(r0, Context::GLOBAL_OBJECT_INDEX)); |
| |
| // Initialize the rest of the slots to the hole value. |
| __ LoadRoot(r1, Heap::kTheHoleValueRootIndex); |
| for (int i = 0; i < slots_; i++) { |
| __ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS)); |
| } |
| |
| // Remove the on-stack argument and return. |
| __ mov(cp, r0); |
| __ add(sp, sp, Operand(2 * kPointerSize)); |
| __ Ret(); |
| |
| // Need to collect. Call into runtime system. |
| __ bind(&gc); |
| __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1); |
| } |
| |
| |
| // Takes a Smi and converts to an IEEE 64 bit floating point value in two |
| // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and |
| // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a |
| // scratch register. Destroys the source register. No GC occurs during this |
| // stub so you don't have to set up the frame. |
| class ConvertToDoubleStub : public PlatformCodeStub { |
| public: |
| ConvertToDoubleStub(Register result_reg_1, |
| Register result_reg_2, |
| Register source_reg, |
| Register scratch_reg) |
| : result1_(result_reg_1), |
| result2_(result_reg_2), |
| source_(source_reg), |
| zeros_(scratch_reg) { } |
| |
| private: |
| Register result1_; |
| Register result2_; |
| Register source_; |
| Register zeros_; |
| |
| // Minor key encoding in 16 bits. |
| class ModeBits: public BitField<OverwriteMode, 0, 2> {}; |
| class OpBits: public BitField<Token::Value, 2, 14> {}; |
| |
| Major MajorKey() { return ConvertToDouble; } |
| int MinorKey() { |
| // Encode the parameters in a unique 16 bit value. |
| return result1_.code() + |
| (result2_.code() << 4) + |
| (source_.code() << 8) + |
| (zeros_.code() << 12); |
| } |
| |
| void Generate(MacroAssembler* masm); |
| }; |
| |
| |
| void ConvertToDoubleStub::Generate(MacroAssembler* masm) { |
| Register exponent = result1_; |
| Register mantissa = result2_; |
| |
| Label not_special; |
| __ SmiUntag(source_); |
| // Move sign bit from source to destination. This works because the sign bit |
| // in the exponent word of the double has the same position and polarity as |
| // the 2's complement sign bit in a Smi. |
| STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); |
| __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC); |
| // Subtract from 0 if source was negative. |
| __ rsb(source_, source_, Operand::Zero(), LeaveCC, ne); |
| |
| // We have -1, 0 or 1, which we treat specially. Register source_ contains |
| // absolute value: it is either equal to 1 (special case of -1 and 1), |
| // greater than 1 (not a special case) or less than 1 (special case of 0). |
| __ cmp(source_, Operand(1)); |
| __ b(gt, ¬_special); |
| |
| // For 1 or -1 we need to or in the 0 exponent (biased to 1023). |
| const uint32_t exponent_word_for_1 = |
| HeapNumber::kExponentBias << HeapNumber::kExponentShift; |
| __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq); |
| // 1, 0 and -1 all have 0 for the second word. |
| __ mov(mantissa, Operand::Zero()); |
| __ Ret(); |
| |
| __ bind(¬_special); |
| __ clz(zeros_, source_); |
| // Compute exponent and or it into the exponent register. |
| // We use mantissa as a scratch register here. Use a fudge factor to |
| // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts |
| // that fit in the ARM's constant field. |
| int fudge = 0x400; |
| __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge)); |
| __ add(mantissa, mantissa, Operand(fudge)); |
| __ orr(exponent, |
| exponent, |
| Operand(mantissa, LSL, HeapNumber::kExponentShift)); |
| // Shift up the source chopping the top bit off. |
| __ add(zeros_, zeros_, Operand(1)); |
| // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0. |
| __ mov(source_, Operand(source_, LSL, zeros_)); |
| // Compute lower part of fraction (last 12 bits). |
| __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord)); |
| // And the top (top 20 bits). |
| __ orr(exponent, |
| exponent, |
| Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord)); |
| __ Ret(); |
| } |
| |
| |
| bool WriteInt32ToHeapNumberStub::IsPregenerated() { |
| // These variants are compiled ahead of time. See next method. |
| if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) { |
| return true; |
| } |
| if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) { |
| return true; |
| } |
| // Other register combinations are generated as and when they are needed, |
| // so it is unsafe to call them from stubs (we can't generate a stub while |
| // we are generating a stub). |
| return false; |
| } |
| |
| |
| void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime( |
| Isolate* isolate) { |
| WriteInt32ToHeapNumberStub stub1(r1, r0, r2); |
| WriteInt32ToHeapNumberStub stub2(r2, r0, r3); |
| stub1.GetCode(isolate)->set_is_pregenerated(true); |
| stub2.GetCode(isolate)->set_is_pregenerated(true); |
| } |
| |
| |
| // See comment for class. |
| void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { |
| Label max_negative_int; |
| // the_int_ has the answer which is a signed int32 but not a Smi. |
| // We test for the special value that has a different exponent. This test |
| // has the neat side effect of setting the flags according to the sign. |
| STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); |
| __ cmp(the_int_, Operand(0x80000000u)); |
| __ b(eq, &max_negative_int); |
| // Set up the correct exponent in scratch_. All non-Smi int32s have the same. |
| // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). |
| uint32_t non_smi_exponent = |
| (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; |
| __ mov(scratch_, Operand(non_smi_exponent)); |
| // Set the sign bit in scratch_ if the value was negative. |
| __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs); |
| // Subtract from 0 if the value was negative. |
| __ rsb(the_int_, the_int_, Operand::Zero(), LeaveCC, cs); |
| // We should be masking the implict first digit of the mantissa away here, |
| // but it just ends up combining harmlessly with the last digit of the |
| // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get |
| // the most significant 1 to hit the last bit of the 12 bit sign and exponent. |
| ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0); |
| const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; |
| __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance)); |
| __ str(scratch_, FieldMemOperand(the_heap_number_, |
| HeapNumber::kExponentOffset)); |
| __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance)); |
| __ str(scratch_, FieldMemOperand(the_heap_number_, |
| HeapNumber::kMantissaOffset)); |
| __ Ret(); |
| |
| __ bind(&max_negative_int); |
| // The max negative int32 is stored as a positive number in the mantissa of |
| // a double because it uses a sign bit instead of using two's complement. |
| // The actual mantissa bits stored are all 0 because the implicit most |
| // significant 1 bit is not stored. |
| non_smi_exponent += 1 << HeapNumber::kExponentShift; |
| __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent)); |
| __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset)); |
| __ mov(ip, Operand::Zero()); |
| __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset)); |
| __ Ret(); |
| } |
| |
| |
| // Handle the case where the lhs and rhs are the same object. |
| // Equality is almost reflexive (everything but NaN), so this is a test |
| // for "identity and not NaN". |
| static void EmitIdenticalObjectComparison(MacroAssembler* masm, |
| Label* slow, |
| Condition cond) { |
| Label not_identical; |
| Label heap_number, return_equal; |
| __ cmp(r0, r1); |
| __ b(ne, ¬_identical); |
| |
| // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), |
| // so we do the second best thing - test it ourselves. |
| // They are both equal and they are not both Smis so both of them are not |
| // Smis. If it's not a heap number, then return equal. |
| if (cond == lt || cond == gt) { |
| __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE); |
| __ b(ge, slow); |
| } else { |
| __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); |
| __ b(eq, &heap_number); |
| // Comparing JS objects with <=, >= is complicated. |
| if (cond != eq) { |
| __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE)); |
| __ b(ge, slow); |
| // Normally here we fall through to return_equal, but undefined is |
| // special: (undefined == undefined) == true, but |
| // (undefined <= undefined) == false! See ECMAScript 11.8.5. |
| if (cond == le || cond == ge) { |
| __ cmp(r4, Operand(ODDBALL_TYPE)); |
| __ b(ne, &return_equal); |
| __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); |
| __ cmp(r0, r2); |
| __ b(ne, &return_equal); |
| if (cond == le) { |
| // undefined <= undefined should fail. |
| __ mov(r0, Operand(GREATER)); |
| } else { |
| // undefined >= undefined should fail. |
| __ mov(r0, Operand(LESS)); |
| } |
| __ Ret(); |
| } |
| } |
| } |
| |
| __ bind(&return_equal); |
| if (cond == lt) { |
| __ mov(r0, Operand(GREATER)); // Things aren't less than themselves. |
| } else if (cond == gt) { |
| __ mov(r0, Operand(LESS)); // Things aren't greater than themselves. |
| } else { |
| __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves. |
| } |
| __ Ret(); |
| |
| // For less and greater we don't have to check for NaN since the result of |
| // x < x is false regardless. For the others here is some code to check |
| // for NaN. |
| if (cond != lt && cond != gt) { |
| __ bind(&heap_number); |
| // It is a heap number, so return non-equal if it's NaN and equal if it's |
| // not NaN. |
| |
| // The representation of NaN values has all exponent bits (52..62) set, |
| // and not all mantissa bits (0..51) clear. |
| // Read top bits of double representation (second word of value). |
| __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); |
| // Test that exponent bits are all set. |
| __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); |
| // NaNs have all-one exponents so they sign extend to -1. |
| __ cmp(r3, Operand(-1)); |
| __ b(ne, &return_equal); |
| |
| // Shift out flag and all exponent bits, retaining only mantissa. |
| __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord)); |
| // Or with all low-bits of mantissa. |
| __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); |
| __ orr(r0, r3, Operand(r2), SetCC); |
| // For equal we already have the right value in r0: Return zero (equal) |
| // if all bits in mantissa are zero (it's an Infinity) and non-zero if |
| // not (it's a NaN). For <= and >= we need to load r0 with the failing |
| // value if it's a NaN. |
| if (cond != eq) { |
| // All-zero means Infinity means equal. |
| __ Ret(eq); |
| if (cond == le) { |
| __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail. |
| } else { |
| __ mov(r0, Operand(LESS)); // NaN >= NaN should fail. |
| } |
| } |
| __ Ret(); |
| } |
| // No fall through here. |
| |
| __ bind(¬_identical); |
| } |
| |
| |
| // See comment at call site. |
| static void EmitSmiNonsmiComparison(MacroAssembler* masm, |
| Register lhs, |
| Register rhs, |
| Label* lhs_not_nan, |
| Label* slow, |
| bool strict) { |
| ASSERT((lhs.is(r0) && rhs.is(r1)) || |
| (lhs.is(r1) && rhs.is(r0))); |
| |
| Label rhs_is_smi; |
| __ JumpIfSmi(rhs, &rhs_is_smi); |
| |
| // Lhs is a Smi. Check whether the rhs is a heap number. |
| __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE); |
| if (strict) { |
| // If rhs is not a number and lhs is a Smi then strict equality cannot |
| // succeed. Return non-equal |
| // If rhs is r0 then there is already a non zero value in it. |
| if (!rhs.is(r0)) { |
| __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); |
| } |
| __ Ret(ne); |
| } else { |
| // Smi compared non-strictly with a non-Smi non-heap-number. Call |
| // the runtime. |
| __ b(ne, slow); |
| } |
| |
| // Lhs is a smi, rhs is a number. |
| // Convert lhs to a double in d7. |
| __ SmiToDouble(d7, lhs); |
| // Load the double from rhs, tagged HeapNumber r0, to d6. |
| __ sub(r7, rhs, Operand(kHeapObjectTag)); |
| __ vldr(d6, r7, HeapNumber::kValueOffset); |
| |
| // We now have both loaded as doubles but we can skip the lhs nan check |
| // since it's a smi. |
| __ jmp(lhs_not_nan); |
| |
| __ bind(&rhs_is_smi); |
| // Rhs is a smi. Check whether the non-smi lhs is a heap number. |
| __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE); |
| if (strict) { |
| // If lhs is not a number and rhs is a smi then strict equality cannot |
| // succeed. Return non-equal. |
| // If lhs is r0 then there is already a non zero value in it. |
| if (!lhs.is(r0)) { |
| __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); |
| } |
| __ Ret(ne); |
| } else { |
| // Smi compared non-strictly with a non-smi non-heap-number. Call |
| // the runtime. |
| __ b(ne, slow); |
| } |
| |
| // Rhs is a smi, lhs is a heap number. |
| // Load the double from lhs, tagged HeapNumber r1, to d7. |
| __ sub(r7, lhs, Operand(kHeapObjectTag)); |
| __ vldr(d7, r7, HeapNumber::kValueOffset); |
| // Convert rhs to a double in d6 . |
| __ SmiToDouble(d6, rhs); |
| // Fall through to both_loaded_as_doubles. |
| } |
| |
| |
| // See comment at call site. |
| static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, |
| Register lhs, |
| Register rhs) { |
| ASSERT((lhs.is(r0) && rhs.is(r1)) || |
| (lhs.is(r1) && rhs.is(r0))); |
| |
| // If either operand is a JS object or an oddball value, then they are |
| // not equal since their pointers are different. |
| // There is no test for undetectability in strict equality. |
| STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); |
| Label first_non_object; |
| // Get the type of the first operand into r2 and compare it with |
| // FIRST_SPEC_OBJECT_TYPE. |
| __ CompareObjectType(rhs, r2, r2, FIRST_SPEC_OBJECT_TYPE); |
| __ b(lt, &first_non_object); |
| |
| // Return non-zero (r0 is not zero) |
| Label return_not_equal; |
| __ bind(&return_not_equal); |
| __ Ret(); |
| |
| __ bind(&first_non_object); |
| // Check for oddballs: true, false, null, undefined. |
| __ cmp(r2, Operand(ODDBALL_TYPE)); |
| __ b(eq, &return_not_equal); |
| |
| __ CompareObjectType(lhs, r3, r3, FIRST_SPEC_OBJECT_TYPE); |
| __ b(ge, &return_not_equal); |
| |
| // Check for oddballs: true, false, null, undefined. |
| __ cmp(r3, Operand(ODDBALL_TYPE)); |
| __ b(eq, &return_not_equal); |
| |
| // Now that we have the types we might as well check for |
| // internalized-internalized. |
| STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); |
| __ orr(r2, r2, Operand(r3)); |
| __ tst(r2, Operand(kIsNotStringMask | kIsNotInternalizedMask)); |
| __ b(eq, &return_not_equal); |
| } |
| |
| |
| // See comment at call site. |
| static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, |
| Register lhs, |
| Register rhs, |
| Label* both_loaded_as_doubles, |
| Label* not_heap_numbers, |
| Label* slow) { |
| ASSERT((lhs.is(r0) && rhs.is(r1)) || |
| (lhs.is(r1) && rhs.is(r0))); |
| |
| __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE); |
| __ b(ne, not_heap_numbers); |
| __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset)); |
| __ cmp(r2, r3); |
| __ b(ne, slow); // First was a heap number, second wasn't. Go slow case. |
| |
| // Both are heap numbers. Load them up then jump to the code we have |
| // for that. |
| __ sub(r7, rhs, Operand(kHeapObjectTag)); |
| __ vldr(d6, r7, HeapNumber::kValueOffset); |
| __ sub(r7, lhs, Operand(kHeapObjectTag)); |
| __ vldr(d7, r7, HeapNumber::kValueOffset); |
| __ jmp(both_loaded_as_doubles); |
| } |
| |
| |
| // Fast negative check for internalized-to-internalized equality. |
| static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, |
| Register lhs, |
| Register rhs, |
| Label* possible_strings, |
| Label* not_both_strings) { |
| ASSERT((lhs.is(r0) && rhs.is(r1)) || |
| (lhs.is(r1) && rhs.is(r0))); |
| |
| // r2 is object type of rhs. |
| Label object_test; |
| STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); |
| __ tst(r2, Operand(kIsNotStringMask)); |
| __ b(ne, &object_test); |
| __ tst(r2, Operand(kIsNotInternalizedMask)); |
| __ b(ne, possible_strings); |
| __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE); |
| __ b(ge, not_both_strings); |
| __ tst(r3, Operand(kIsNotInternalizedMask)); |
| __ b(ne, possible_strings); |
| |
| // Both are internalized. We already checked they weren't the same pointer |
| // so they are not equal. |
| __ mov(r0, Operand(NOT_EQUAL)); |
| __ Ret(); |
| |
| __ bind(&object_test); |
| __ cmp(r2, Operand(FIRST_SPEC_OBJECT_TYPE)); |
| __ b(lt, not_both_strings); |
| __ CompareObjectType(lhs, r2, r3, FIRST_SPEC_OBJECT_TYPE); |
| __ b(lt, not_both_strings); |
| // If both objects are undetectable, they are equal. Otherwise, they |
| // are not equal, since they are different objects and an object is not |
| // equal to undefined. |
| __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset)); |
| __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset)); |
| __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset)); |
| __ and_(r0, r2, Operand(r3)); |
| __ and_(r0, r0, Operand(1 << Map::kIsUndetectable)); |
| __ eor(r0, r0, Operand(1 << Map::kIsUndetectable)); |
| __ Ret(); |
| } |
| |
| |
| void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, |
| Register object, |
| Register result, |
| Register scratch1, |
| Register scratch2, |
| Register scratch3, |
| Label* not_found) { |
| // Use of registers. Register result is used as a temporary. |
| Register number_string_cache = result; |
| Register mask = scratch3; |
| |
| // Load the number string cache. |
| __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); |
| |
| // Make the hash mask from the length of the number string cache. It |
| // contains two elements (number and string) for each cache entry. |
| __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset)); |
| // Divide length by two (length is a smi). |
| __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1)); |
| __ sub(mask, mask, Operand(1)); // Make mask. |
| |
| // Calculate the entry in the number string cache. The hash value in the |
| // number string cache for smis is just the smi value, and the hash for |
| // doubles is the xor of the upper and lower words. See |
| // Heap::GetNumberStringCache. |
| Isolate* isolate = masm->isolate(); |
| Label is_smi; |
| Label load_result_from_cache; |
| __ JumpIfSmi(object, &is_smi); |
| __ CheckMap(object, |
| scratch1, |
| Heap::kHeapNumberMapRootIndex, |
| not_found, |
| DONT_DO_SMI_CHECK); |
| |
| STATIC_ASSERT(8 == kDoubleSize); |
| __ add(scratch1, |
| object, |
| Operand(HeapNumber::kValueOffset - kHeapObjectTag)); |
| __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit()); |
| __ eor(scratch1, scratch1, Operand(scratch2)); |
| __ and_(scratch1, scratch1, Operand(mask)); |
| |
| // Calculate address of entry in string cache: each entry consists |
| // of two pointer sized fields. |
| __ add(scratch1, |
| number_string_cache, |
| Operand(scratch1, LSL, kPointerSizeLog2 + 1)); |
| |
| Register probe = mask; |
| __ ldr(probe, |
| FieldMemOperand(scratch1, FixedArray::kHeaderSize)); |
| __ JumpIfSmi(probe, not_found); |
| __ sub(scratch2, object, Operand(kHeapObjectTag)); |
| __ vldr(d0, scratch2, HeapNumber::kValueOffset); |
| __ sub(probe, probe, Operand(kHeapObjectTag)); |
| __ vldr(d1, probe, HeapNumber::kValueOffset); |
| __ VFPCompareAndSetFlags(d0, d1); |
| __ b(ne, not_found); // The cache did not contain this value. |
| __ b(&load_result_from_cache); |
| |
| __ bind(&is_smi); |
| Register scratch = scratch1; |
| __ and_(scratch, mask, Operand(object, ASR, 1)); |
| // Calculate address of entry in string cache: each entry consists |
| // of two pointer sized fields. |
| __ add(scratch, |
| number_string_cache, |
| Operand(scratch, LSL, kPointerSizeLog2 + 1)); |
| |
| // Check if the entry is the smi we are looking for. |
| __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); |
| __ cmp(object, probe); |
| __ b(ne, not_found); |
| |
| // Get the result from the cache. |
| __ bind(&load_result_from_cache); |
| __ ldr(result, |
| FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize)); |
| __ IncrementCounter(isolate->counters()->number_to_string_native(), |
| 1, |
| scratch1, |
| scratch2); |
| } |
| |
| |
| void NumberToStringStub::Generate(MacroAssembler* masm) { |
| Label runtime; |
| |
| __ ldr(r1, MemOperand(sp, 0)); |
| |
| // Generate code to lookup number in the number string cache. |
| GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, &runtime); |
| __ add(sp, sp, Operand(1 * kPointerSize)); |
| __ Ret(); |
| |
| __ bind(&runtime); |
| // Handle number to string in the runtime system if not found in the cache. |
| __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1); |
| } |
| |
| |
| static void ICCompareStub_CheckInputType(MacroAssembler* masm, |
| Register input, |
| Register scratch, |
| CompareIC::State expected, |
| Label* fail) { |
| Label ok; |
| if (expected == CompareIC::SMI) { |
| __ JumpIfNotSmi(input, fail); |
| } else if (expected == CompareIC::NUMBER) { |
| __ JumpIfSmi(input, &ok); |
| __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail, |
| DONT_DO_SMI_CHECK); |
| } |
| // We could be strict about internalized/non-internalized here, but as long as |
| // hydrogen doesn't care, the stub doesn't have to care either. |
| __ bind(&ok); |
| } |
| |
| |
| // On entry r1 and r2 are the values to be compared. |
| // On exit r0 is 0, positive or negative to indicate the result of |
| // the comparison. |
| void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { |
| Register lhs = r1; |
| Register rhs = r0; |
| Condition cc = GetCondition(); |
| |
| Label miss; |
| ICCompareStub_CheckInputType(masm, lhs, r2, left_, &miss); |
| ICCompareStub_CheckInputType(masm, rhs, r3, right_, &miss); |
| |
| Label slow; // Call builtin. |
| Label not_smis, both_loaded_as_doubles, lhs_not_nan; |
| |
| Label not_two_smis, smi_done; |
| __ orr(r2, r1, r0); |
| __ JumpIfNotSmi(r2, ¬_two_smis); |
| __ mov(r1, Operand(r1, ASR, 1)); |
| __ sub(r0, r1, Operand(r0, ASR, 1)); |
| __ Ret(); |
| __ bind(¬_two_smis); |
| |
| // NOTICE! This code is only reached after a smi-fast-case check, so |
| // it is certain that at least one operand isn't a smi. |
| |
| // Handle the case where the objects are identical. Either returns the answer |
| // or goes to slow. Only falls through if the objects were not identical. |
| EmitIdenticalObjectComparison(masm, &slow, cc); |
| |
| // If either is a Smi (we know that not both are), then they can only |
| // be strictly equal if the other is a HeapNumber. |
| STATIC_ASSERT(kSmiTag == 0); |
| ASSERT_EQ(0, Smi::FromInt(0)); |
| __ and_(r2, lhs, Operand(rhs)); |
| __ JumpIfNotSmi(r2, ¬_smis); |
| // One operand is a smi. EmitSmiNonsmiComparison generates code that can: |
| // 1) Return the answer. |
| // 2) Go to slow. |
| // 3) Fall through to both_loaded_as_doubles. |
| // 4) Jump to lhs_not_nan. |
| // In cases 3 and 4 we have found out we were dealing with a number-number |
| // comparison. If VFP3 is supported the double values of the numbers have |
| // been loaded into d7 and d6. Otherwise, the double values have been loaded |
| // into r0, r1, r2, and r3. |
| EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict()); |
| |
| __ bind(&both_loaded_as_doubles); |
| // The arguments have been converted to doubles and stored in d6 and d7, if |
| // VFP3 is supported, or in r0, r1, r2, and r3. |
| Isolate* isolate = masm->isolate(); |
| __ bind(&lhs_not_nan); |
| Label no_nan; |
| // ARMv7 VFP3 instructions to implement double precision comparison. |
| __ VFPCompareAndSetFlags(d7, d6); |
| Label nan; |
| __ b(vs, &nan); |
| __ mov(r0, Operand(EQUAL), LeaveCC, eq); |
| __ mov(r0, Operand(LESS), LeaveCC, lt); |
| __ mov(r0, Operand(GREATER), LeaveCC, gt); |
| __ Ret(); |
| |
| __ bind(&nan); |
| // If one of the sides was a NaN then the v flag is set. Load r0 with |
| // whatever it takes to make the comparison fail, since comparisons with NaN |
| // always fail. |
| if (cc == lt || cc == le) { |
| __ mov(r0, Operand(GREATER)); |
| } else { |
| __ mov(r0, Operand(LESS)); |
| } |
| __ Ret(); |
| |
| __ bind(¬_smis); |
| // At this point we know we are dealing with two different objects, |
| // and neither of them is a Smi. The objects are in rhs_ and lhs_. |
| if (strict()) { |
| // This returns non-equal for some object types, or falls through if it |
| // was not lucky. |
| EmitStrictTwoHeapObjectCompare(masm, lhs, rhs); |
| } |
| |
| Label check_for_internalized_strings; |
| Label flat_string_check; |
| // Check for heap-number-heap-number comparison. Can jump to slow case, |
| // or load both doubles into r0, r1, r2, r3 and jump to the code that handles |
| // that case. If the inputs are not doubles then jumps to |
| // check_for_internalized_strings. |
| // In this case r2 will contain the type of rhs_. Never falls through. |
| EmitCheckForTwoHeapNumbers(masm, |
| lhs, |
| rhs, |
| &both_loaded_as_doubles, |
| &check_for_internalized_strings, |
| &flat_string_check); |
| |
| __ bind(&check_for_internalized_strings); |
| // In the strict case the EmitStrictTwoHeapObjectCompare already took care of |
| // internalized strings. |
| if (cc == eq && !strict()) { |
| // Returns an answer for two internalized strings or two detectable objects. |
| // Otherwise jumps to string case or not both strings case. |
| // Assumes that r2 is the type of rhs_ on entry. |
| EmitCheckForInternalizedStringsOrObjects( |
| masm, lhs, rhs, &flat_string_check, &slow); |
| } |
| |
| // Check for both being sequential ASCII strings, and inline if that is the |
| // case. |
| __ bind(&flat_string_check); |
| |
| __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, r2, r3, &slow); |
| |
| __ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3); |
| if (cc == eq) { |
| StringCompareStub::GenerateFlatAsciiStringEquals(masm, |
| lhs, |
| rhs, |
| r2, |
| r3, |
| r4); |
| } else { |
| StringCompareStub::GenerateCompareFlatAsciiStrings(masm, |
| lhs, |
| rhs, |
| r2, |
| r3, |
| r4, |
| r5); |
| } |
| // Never falls through to here. |
| |
| __ bind(&slow); |
| |
| __ Push(lhs, rhs); |
| // Figure out which native to call and setup the arguments. |
| Builtins::JavaScript native; |
| if (cc == eq) { |
| native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS; |
| } else { |
| native = Builtins::COMPARE; |
| int ncr; // NaN compare result |
| if (cc == lt || cc == le) { |
| ncr = GREATER; |
| } else { |
| ASSERT(cc == gt || cc == ge); // remaining cases |
| ncr = LESS; |
| } |
| __ mov(r0, Operand(Smi::FromInt(ncr))); |
| __ push(r0); |
| } |
| |
| // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) |
| // tagged as a small integer. |
| __ InvokeBuiltin(native, JUMP_FUNCTION); |
| |
| __ bind(&miss); |
| GenerateMiss(masm); |
| } |
| |
| |
| void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { |
| // We don't allow a GC during a store buffer overflow so there is no need to |
| // store the registers in any particular way, but we do have to store and |
| // restore them. |
| __ stm(db_w, sp, kCallerSaved | lr.bit()); |
| |
| const Register scratch = r1; |
| |
| if (save_doubles_ == kSaveFPRegs) { |
| __ SaveFPRegs(sp, scratch); |
| } |
| const int argument_count = 1; |
| const int fp_argument_count = 0; |
| |
| AllowExternalCallThatCantCauseGC scope(masm); |
| __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); |
| __ mov(r0, Operand(ExternalReference::isolate_address(masm->isolate()))); |
| __ CallCFunction( |
| ExternalReference::store_buffer_overflow_function(masm->isolate()), |
| argument_count); |
| if (save_doubles_ == kSaveFPRegs) { |
| __ RestoreFPRegs(sp, scratch); |
| } |
| __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0). |
| } |
| |
| |
| // Generates code to call a C function to do a double operation. |
| // This code never falls through, but returns with a heap number containing |
| // the result in r0. |
| // Register heapnumber_result must be a heap number in which the |
| // result of the operation will be stored. |
| // Requires the following layout on entry: |
| // d0: Left value. |
| // d1: Right value. |
| // If soft float ABI, use also r0, r1, r2, r3. |
| static void CallCCodeForDoubleOperation(MacroAssembler* masm, |
| Token::Value op, |
| Register heap_number_result, |
| Register scratch) { |
| // Assert that heap_number_result is callee-saved. |
| // We currently always use r5 to pass it. |
| ASSERT(heap_number_result.is(r5)); |
| |
| // Push the current return address before the C call. Return will be |
| // through pop(pc) below. |
| __ push(lr); |
| __ PrepareCallCFunction(0, 2, scratch); |
| if (!masm->use_eabi_hardfloat()) { |
| __ vmov(r0, r1, d0); |
| __ vmov(r2, r3, d1); |
| } |
| { |
| AllowExternalCallThatCantCauseGC scope(masm); |
| __ CallCFunction( |
| ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); |
| } |
| // Store answer in the overwritable heap number. Double returned in |
| // registers r0 and r1 or in d0. |
| if (masm->use_eabi_hardfloat()) { |
| __ vstr(d0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); |
| } else { |
| __ Strd(r0, r1, |
| FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); |
| } |
| // Place heap_number_result in r0 and return to the pushed return address. |
| __ mov(r0, Operand(heap_number_result)); |
| __ pop(pc); |
| } |
| |
| |
| void BinaryOpStub::Initialize() { |
| platform_specific_bit_ = true; // VFP2 is a base requirement for V8 |
| } |
| |
| |
| void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
| Label get_result; |
| |
| __ Push(r1, r0); |
| |
| __ mov(r2, Operand(Smi::FromInt(MinorKey()))); |
| __ push(r2); |
| |
| __ TailCallExternalReference( |
| ExternalReference(IC_Utility(IC::kBinaryOp_Patch), |
| masm->isolate()), |
| 3, |
| 1); |
| } |
| |
| |
| void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( |
| MacroAssembler* masm) { |
| UNIMPLEMENTED(); |
| } |
| |
| |
| void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, |
| Token::Value op) { |
| Register left = r1; |
| Register right = r0; |
| Register scratch1 = r7; |
| Register scratch2 = r9; |
| |
| ASSERT(right.is(r0)); |
| STATIC_ASSERT(kSmiTag == 0); |
| |
| Label not_smi_result; |
| switch (op) { |
| case Token::ADD: |
| __ add(right, left, Operand(right), SetCC); // Add optimistically. |
| __ Ret(vc); |
| __ sub(right, right, Operand(left)); // Revert optimistic add. |
| break; |
| case Token::SUB: |
| __ sub(right, left, Operand(right), SetCC); // Subtract optimistically. |
| __ Ret(vc); |
| __ sub(right, left, Operand(right)); // Revert optimistic subtract. |
| break; |
| case Token::MUL: |
| // Remove tag from one of the operands. This way the multiplication result |
| // will be a smi if it fits the smi range. |
| __ SmiUntag(ip, right); |
| // Do multiplication |
| // scratch1 = lower 32 bits of ip * left. |
| // scratch2 = higher 32 bits of ip * left. |
| __ smull(scratch1, scratch2, left, ip); |
| // Check for overflowing the smi range - no overflow if higher 33 bits of |
| // the result are identical. |
| __ mov(ip, Operand(scratch1, ASR, 31)); |
| __ cmp(ip, Operand(scratch2)); |
| __ b(ne, ¬_smi_result); |
| // Go slow on zero result to handle -0. |
| __ cmp(scratch1, Operand::Zero()); |
| __ mov(right, Operand(scratch1), LeaveCC, ne); |
| __ Ret(ne); |
| // We need -0 if we were multiplying a negative number with 0 to get 0. |
| // We know one of them was zero. |
| __ add(scratch2, right, Operand(left), SetCC); |
| __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl); |
| __ Ret(pl); // Return smi 0 if the non-zero one was positive. |
| // We fall through here if we multiplied a negative number with 0, because |
| // that would mean we should produce -0. |
| break; |
| case Token::DIV: { |
| Label div_with_sdiv; |
| |
| // Check for 0 divisor. |
| __ cmp(right, Operand::Zero()); |
| __ b(eq, ¬_smi_result); |
| |
| // Check for power of two on the right hand side. |
| __ sub(scratch1, right, Operand(1)); |
| __ tst(scratch1, right); |
| if (CpuFeatures::IsSupported(SUDIV)) { |
| __ b(ne, &div_with_sdiv); |
| // Check for no remainder. |
| __ tst(left, scratch1); |
| __ b(ne, ¬_smi_result); |
| // Check for positive left hand side. |
| __ cmp(left, Operand::Zero()); |
| __ b(mi, &div_with_sdiv); |
| } else { |
| __ b(ne, ¬_smi_result); |
| // Check for positive and no remainder. |
| __ orr(scratch2, scratch1, Operand(0x80000000u)); |
| __ tst(left, scratch2); |
| __ b(ne, ¬_smi_result); |
| } |
| |
| // Perform division by shifting. |
| __ clz(scratch1, scratch1); |
| __ rsb(scratch1, scratch1, Operand(31)); |
| __ mov(right, Operand(left, LSR, scratch1)); |
| __ Ret(); |
| |
| if (CpuFeatures::IsSupported(SUDIV)) { |
| CpuFeatureScope scope(masm, SUDIV); |
| Label result_not_zero; |
| |
| __ bind(&div_with_sdiv); |
| // Do division. |
| __ sdiv(scratch1, left, right); |
| // Check that the remainder is zero. |
| __ mls(scratch2, scratch1, right, left); |
| __ cmp(scratch2, Operand::Zero()); |
| __ b(ne, ¬_smi_result); |
| // Check for negative zero result. |
| __ cmp(scratch1, Operand::Zero()); |
| __ b(ne, &result_not_zero); |
| __ cmp(right, Operand::Zero()); |
| __ b(lt, ¬_smi_result); |
| __ bind(&result_not_zero); |
| // Check for the corner case of dividing the most negative smi by -1. |
| __ cmp(scratch1, Operand(0x40000000)); |
| __ b(eq, ¬_smi_result); |
| // Tag and return the result. |
| __ SmiTag(right, scratch1); |
| __ Ret(); |
| } |
| break; |
| } |
| case Token::MOD: { |
| Label modulo_with_sdiv; |
| |
| if (CpuFeatures::IsSupported(SUDIV)) { |
| // Check for x % 0. |
| __ cmp(right, Operand::Zero()); |
| __ b(eq, ¬_smi_result); |
| |
| // Check for two positive smis. |
| __ orr(scratch1, left, Operand(right)); |
| __ tst(scratch1, Operand(0x80000000u)); |
| __ b(ne, &modulo_with_sdiv); |
| |
| // Check for power of two on the right hand side. |
| __ sub(scratch1, right, Operand(1)); |
| __ tst(scratch1, right); |
| __ b(ne, &modulo_with_sdiv); |
| } else { |
| // Check for two positive smis. |
| __ orr(scratch1, left, Operand(right)); |
| __ tst(scratch1, Operand(0x80000000u)); |
| __ b(ne, ¬_smi_result); |
| |
| // Check for power of two on the right hand side. |
| __ JumpIfNotPowerOfTwoOrZero(right, scratch1, ¬_smi_result); |
| } |
| |
| // Perform modulus by masking (scratch1 contains right - 1). |
| __ and_(right, left, Operand(scratch1)); |
| __ Ret(); |
| |
| if (CpuFeatures::IsSupported(SUDIV)) { |
| CpuFeatureScope scope(masm, SUDIV); |
| __ bind(&modulo_with_sdiv); |
| __ mov(scratch2, right); |
| // Perform modulus with sdiv and mls. |
| __ sdiv(scratch1, left, right); |
| __ mls(right, scratch1, right, left); |
| // Return if the result is not 0. |
| __ cmp(right, Operand::Zero()); |
| __ Ret(ne); |
| // The result is 0, check for -0 case. |
| __ cmp(left, Operand::Zero()); |
| __ Ret(pl); |
| // This is a -0 case, restore the value of right. |
| __ mov(right, scratch2); |
| // We fall through here to not_smi_result to produce -0. |
| } |
| break; |
| } |
| case Token::BIT_OR: |
| __ orr(right, left, Operand(right)); |
| __ Ret(); |
| break; |
| case Token::BIT_AND: |
| __ and_(right, left, Operand(right)); |
| __ Ret(); |
| break; |
| case Token::BIT_XOR: |
| __ eor(right, left, Operand(right)); |
| __ Ret(); |
| break; |
| case Token::SAR: |
| // Remove tags from right operand. |
| __ GetLeastBitsFromSmi(scratch1, right, 5); |
| __ mov(right, Operand(left, ASR, scratch1)); |
| // Smi tag result. |
| __ bic(right, right, Operand(kSmiTagMask)); |
| __ Ret(); |
| break; |
| case Token::SHR: |
| // Remove tags from operands. We can't do this on a 31 bit number |
| // because then the 0s get shifted into bit 30 instead of bit 31. |
| __ SmiUntag(scratch1, left); |
| __ GetLeastBitsFromSmi(scratch2, right, 5); |
| __ mov(scratch1, Operand(scratch1, LSR, scratch2)); |
| // Unsigned shift is not allowed to produce a negative number, so |
| // check the sign bit and the sign bit after Smi tagging. |
| __ tst(scratch1, Operand(0xc0000000)); |
| __ b(ne, ¬_smi_result); |
| // Smi tag result. |
| __ SmiTag(right, scratch1); |
| __ Ret(); |
| break; |
| case Token::SHL: |
| // Remove tags from operands. |
| __ SmiUntag(scratch1, left); |
| __ GetLeastBitsFromSmi(scratch2, right, 5); |
| __ mov(scratch1, Operand(scratch1, LSL, scratch2)); |
| // Check that the signed result fits in a Smi. |
| __ TrySmiTag(right, scratch1, ¬_smi_result); |
| __ Ret(); |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| __ bind(¬_smi_result); |
| } |
| |
| |
| void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, |
| Register result, |
| Register heap_number_map, |
| Register scratch1, |
| Register scratch2, |
| Label* gc_required, |
| OverwriteMode mode); |
| |
| |
| void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, |
| BinaryOpIC::TypeInfo left_type, |
| BinaryOpIC::TypeInfo right_type, |
| bool smi_operands, |
| Label* not_numbers, |
| Label* gc_required, |
| Label* miss, |
| Token::Value op, |
| OverwriteMode mode) { |
| Register left = r1; |
| Register right = r0; |
| Register scratch1 = r6; |
| Register scratch2 = r7; |
| Register scratch3 = r4; |
| |
| ASSERT(smi_operands || (not_numbers != NULL)); |
| if (smi_operands) { |
| __ AssertSmi(left); |
| __ AssertSmi(right); |
| } |
| if (left_type == BinaryOpIC::SMI) { |
| __ JumpIfNotSmi(left, miss); |
| } |
| if (right_type == BinaryOpIC::SMI) { |
| __ JumpIfNotSmi(right, miss); |
| } |
| |
| Register heap_number_map = r9; |
| __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| |
| switch (op) { |
| case Token::ADD: |
| case Token::SUB: |
| case Token::MUL: |
| case Token::DIV: |
| case Token::MOD: { |
| // Allocate new heap number for result. |
| Register result = r5; |
| BinaryOpStub_GenerateHeapResultAllocation( |
| masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); |
| |
| // Load left and right operands into d0 and d1. |
| if (smi_operands) { |
| __ SmiToDouble(d1, right); |
| __ SmiToDouble(d0, left); |
| } else { |
| // Load right operand into d1. |
| if (right_type == BinaryOpIC::INT32) { |
| __ LoadNumberAsInt32Double( |
| right, d1, heap_number_map, scratch1, d8, miss); |
| } else { |
| Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers; |
| __ LoadNumber(right, d1, heap_number_map, scratch1, fail); |
| } |
| // Load left operand into d0. |
| if (left_type == BinaryOpIC::INT32) { |
| __ LoadNumberAsInt32Double( |
| left, d0, heap_number_map, scratch1, d8, miss); |
| } else { |
| Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers; |
| __ LoadNumber( |
| left, d0, heap_number_map, scratch1, fail); |
| } |
| } |
| |
| // Calculate the result. |
| if (op != Token::MOD) { |
| // Using VFP registers: |
| // d0: Left value |
| // d1: Right value |
| switch (op) { |
| case Token::ADD: |
| __ vadd(d5, d0, d1); |
| break; |
| case Token::SUB: |
| __ vsub(d5, d0, d1); |
| break; |
| case Token::MUL: |
| __ vmul(d5, d0, d1); |
| break; |
| case Token::DIV: |
| __ vdiv(d5, d0, d1); |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| |
| __ sub(r0, result, Operand(kHeapObjectTag)); |
| __ vstr(d5, r0, HeapNumber::kValueOffset); |
| __ add(r0, r0, Operand(kHeapObjectTag)); |
| __ Ret(); |
| } else { |
| // Call the C function to handle the double operation. |
| CallCCodeForDoubleOperation(masm, op, result, scratch1); |
| if (FLAG_debug_code) { |
| __ stop("Unreachable code."); |
| } |
| } |
| break; |
| } |
| case Token::BIT_OR: |
| case Token::BIT_XOR: |
| case Token::BIT_AND: |
| case Token::SAR: |
| case Token::SHR: |
| case Token::SHL: { |
| if (smi_operands) { |
| __ SmiUntag(r3, left); |
| __ SmiUntag(r2, right); |
| } else { |
| // Convert operands to 32-bit integers. Right in r2 and left in r3. |
| __ ConvertNumberToInt32( |
| left, r3, heap_number_map, |
| scratch1, scratch2, scratch3, d0, d1, not_numbers); |
| __ ConvertNumberToInt32( |
| right, r2, heap_number_map, |
| scratch1, scratch2, scratch3, d0, d1, not_numbers); |
| } |
| |
| Label result_not_a_smi; |
| switch (op) { |
| case Token::BIT_OR: |
| __ orr(r2, r3, Operand(r2)); |
| break; |
| case Token::BIT_XOR: |
| __ eor(r2, r3, Operand(r2)); |
| break; |
| case Token::BIT_AND: |
| __ and_(r2, r3, Operand(r2)); |
| break; |
| case Token::SAR: |
| // Use only the 5 least significant bits of the shift count. |
| __ GetLeastBitsFromInt32(r2, r2, 5); |
| __ mov(r2, Operand(r3, ASR, r2)); |
| break; |
| case Token::SHR: |
| // Use only the 5 least significant bits of the shift count. |
| __ GetLeastBitsFromInt32(r2, r2, 5); |
| __ mov(r2, Operand(r3, LSR, r2), SetCC); |
| // SHR is special because it is required to produce a positive answer. |
| // The code below for writing into heap numbers isn't capable of |
| // writing the register as an unsigned int so we go to slow case if we |
| // hit this case. |
| __ b(mi, &result_not_a_smi); |
| break; |
| case Token::SHL: |
| // Use only the 5 least significant bits of the shift count. |
| __ GetLeastBitsFromInt32(r2, r2, 5); |
| __ mov(r2, Operand(r3, LSL, r2)); |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| |
| // Check that the *signed* result fits in a smi. |
| __ TrySmiTag(r0, r2, &result_not_a_smi); |
| __ Ret(); |
| |
| // Allocate new heap number for result. |
| __ bind(&result_not_a_smi); |
| Register result = r5; |
| if (smi_operands) { |
| __ AllocateHeapNumber( |
| result, scratch1, scratch2, heap_number_map, gc_required); |
| } else { |
| BinaryOpStub_GenerateHeapResultAllocation( |
| masm, result, heap_number_map, scratch1, scratch2, gc_required, |
| mode); |
| } |
| |
| // r2: Answer as signed int32. |
| // r5: Heap number to write answer into. |
| |
| // Nothing can go wrong now, so move the heap number to r0, which is the |
| // result. |
| __ mov(r0, Operand(r5)); |
| |
| // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As |
| // mentioned above SHR needs to always produce a positive result. |
| __ vmov(s0, r2); |
| if (op == Token::SHR) { |
| __ vcvt_f64_u32(d0, s0); |
| } else { |
| __ vcvt_f64_s32(d0, s0); |
| } |
| __ sub(r3, r0, Operand(kHeapObjectTag)); |
| __ vstr(d0, r3, HeapNumber::kValueOffset); |
| __ Ret(); |
| break; |
| } |
| default: |
| UNREACHABLE(); |
| } |
| } |
| |
| |
| // Generate the smi code. If the operation on smis are successful this return is |
| // generated. If the result is not a smi and heap number allocation is not |
| // requested the code falls through. If number allocation is requested but a |
| // heap number cannot be allocated the code jumps to the label gc_required. |
| void BinaryOpStub_GenerateSmiCode( |
| MacroAssembler* masm, |
| Label* use_runtime, |
| Label* gc_required, |
| Token::Value op, |
| BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, |
| OverwriteMode mode) { |
| Label not_smis; |
| |
| Register left = r1; |
| Register right = r0; |
| Register scratch1 = r7; |
| |
| // Perform combined smi check on both operands. |
| __ orr(scratch1, left, Operand(right)); |
| __ JumpIfNotSmi(scratch1, ¬_smis); |
| |
| // If the smi-smi operation results in a smi return is generated. |
| BinaryOpStub_GenerateSmiSmiOperation(masm, op); |
| |
| // If heap number results are possible generate the result in an allocated |
| // heap number. |
| if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) { |
| BinaryOpStub_GenerateFPOperation( |
| masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true, |
| use_runtime, gc_required, ¬_smis, op, mode); |
| } |
| __ bind(¬_smis); |
| } |
| |
| |
| void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { |
| Label right_arg_changed, call_runtime; |
| |
| if (op_ == Token::MOD && encoded_right_arg_.has_value) { |
| // It is guaranteed that the value will fit into a Smi, because if it |
| // didn't, we wouldn't be here, see BinaryOp_Patch. |
| __ cmp(r0, Operand(Smi::FromInt(fixed_right_arg_value()))); |
| __ b(ne, &right_arg_changed); |
| } |
| |
| if (result_type_ == BinaryOpIC::UNINITIALIZED || |
| result_type_ == BinaryOpIC::SMI) { |
| // Only allow smi results. |
| BinaryOpStub_GenerateSmiCode( |
| masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_); |
| } else { |
| // Allow heap number result and don't make a transition if a heap number |
| // cannot be allocated. |
| BinaryOpStub_GenerateSmiCode( |
| masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, |
| mode_); |
| } |
| |
| // Code falls through if the result is not returned as either a smi or heap |
| // number. |
| __ bind(&right_arg_changed); |
| GenerateTypeTransition(masm); |
| |
| __ bind(&call_runtime); |
| { |
| FrameScope scope(masm, StackFrame::INTERNAL); |
| GenerateRegisterArgsPush(masm); |
| GenerateCallRuntime(masm); |
| } |
| __ Ret(); |
| } |
| |
| |
| void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { |
| Label call_runtime; |
| ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING); |
| ASSERT(op_ == Token::ADD); |
| // If both arguments are strings, call the string add stub. |
| // Otherwise, do a transition. |
| |
| // Registers containing left and right operands respectively. |
| Register left = r1; |
| Register right = r0; |
| |
| // Test if left operand is a string. |
| __ JumpIfSmi(left, &call_runtime); |
| __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE); |
| __ b(ge, &call_runtime); |
| |
| // Test if right operand is a string. |
| __ JumpIfSmi(right, &call_runtime); |
| __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE); |
| __ b(ge, &call_runtime); |
| |
| StringAddStub string_add_stub( |
| (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME)); |
| GenerateRegisterArgsPush(masm); |
| __ TailCallStub(&string_add_stub); |
| |
| __ bind(&call_runtime); |
| GenerateTypeTransition(masm); |
| } |
| |
| |
| void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
| ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); |
| |
| Register left = r1; |
| Register right = r0; |
| Register scratch1 = r7; |
| Register scratch2 = r9; |
| LowDwVfpRegister double_scratch = d0; |
| |
| Register heap_number_result = no_reg; |
| Register heap_number_map = r6; |
| __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| |
| Label call_runtime; |
| // Labels for type transition, used for wrong input or output types. |
| // Both label are currently actually bound to the same position. We use two |
| // different label to differentiate the cause leading to type transition. |
| Label transition; |
| |
| // Smi-smi fast case. |
| Label skip; |
| __ orr(scratch1, left, right); |
| __ JumpIfNotSmi(scratch1, &skip); |
| BinaryOpStub_GenerateSmiSmiOperation(masm, op_); |
| // Fall through if the result is not a smi. |
| __ bind(&skip); |
| |
| switch (op_) { |
| case Token::ADD: |
| case Token::SUB: |
| case Token::MUL: |
| case Token::DIV: |
| case Token::MOD: { |
| // It could be that only SMIs have been seen at either the left |
| // or the right operand. For precise type feedback, patch the IC |
| // again if this changes. |
| if (left_type_ == BinaryOpIC::SMI) { |
| __ JumpIfNotSmi(left, &transition); |
| } |
| if (right_type_ == BinaryOpIC::SMI) { |
| __ JumpIfNotSmi(right, &transition); |
| } |
| // Load both operands and check that they are 32-bit integer. |
| // Jump to type transition if they are not. The registers r0 and r1 (right |
| // and left) are preserved for the runtime call. |
| __ LoadNumberAsInt32Double( |
| right, d1, heap_number_map, scratch1, d8, &transition); |
| __ LoadNumberAsInt32Double( |
| left, d0, heap_number_map, scratch1, d8, &transition); |
| |
| if (op_ != Token::MOD) { |
| Label return_heap_number; |
| switch (op_) { |
| case Token::ADD: |
| __ vadd(d5, d0, d1); |
| break; |
| case Token::SUB: |
| __ vsub(d5, d0, d1); |
| break; |
| case Token::MUL: |
| __ vmul(d5, d0, d1); |
| break; |
| case Token::DIV: |
| __ vdiv(d5, d0, d1); |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| |
| if (result_type_ <= BinaryOpIC::INT32) { |
| __ TryDoubleToInt32Exact(scratch1, d5, d8); |
| // If the ne condition is set, result does |
| // not fit in a 32-bit integer. |
| __ b(ne, &transition); |
| // Try to tag the result as a Smi, return heap number on overflow. |
| __ SmiTag(scratch1, SetCC); |
| __ b(vs, &return_heap_number); |
| // Check for minus zero, transition in that case (because we need |
| // to return a heap number). |
| Label not_zero; |
| ASSERT(kSmiTag == 0); |
| __ b(ne, ¬_zero); |
| __ VmovHigh(scratch2, d5); |
| __ tst(scratch2, Operand(HeapNumber::kSignMask)); |
| __ b(ne, &transition); |
| __ bind(¬_zero); |
| __ mov(r0, scratch1); |
| __ Ret(); |
| } |
| |
| __ bind(&return_heap_number); |
| // Return a heap number, or fall through to type transition or runtime |
| // call if we can't. |
| // We are using vfp registers so r5 is available. |
| heap_number_result = r5; |
| BinaryOpStub_GenerateHeapResultAllocation(masm, |
| heap_number_result, |
| heap_number_map, |
| scratch1, |
| scratch2, |
| &call_runtime, |
| mode_); |
| __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); |
| __ vstr(d5, r0, HeapNumber::kValueOffset); |
| __ mov(r0, heap_number_result); |
| __ Ret(); |
| |
| // A DIV operation expecting an integer result falls through |
| // to type transition. |
| |
| } else { |
| if (encoded_right_arg_.has_value) { |
| __ Vmov(d8, fixed_right_arg_value(), scratch1); |
| __ VFPCompareAndSetFlags(d1, d8); |
| __ b(ne, &transition); |
| } |
| |
| // We preserved r0 and r1 to be able to call runtime. |
| // Save the left value on the stack. |
| __ Push(r5, r4); |
| |
| Label pop_and_call_runtime; |
| |
| // Allocate a heap number to store the result. |
| heap_number_result = r5; |
| BinaryOpStub_GenerateHeapResultAllocation(masm, |
| heap_number_result, |
| heap_number_map, |
| scratch1, |
| scratch2, |
| &pop_and_call_runtime, |
| mode_); |
| |
| // Load the left value from the value saved on the stack. |
| __ Pop(r1, r0); |
| |
| // Call the C function to handle the double operation. |
| CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1); |
| if (FLAG_debug_code) { |
| __ stop("Unreachable code."); |
| } |
| |
| __ bind(&pop_and_call_runtime); |
| __ Drop(2); |
| __ b(&call_runtime); |
| } |
| |
| break; |
| } |
| |
| case Token::BIT_OR: |
| case Token::BIT_XOR: |
| case Token::BIT_AND: |
| case Token::SAR: |
| case Token::SHR: |
| case Token::SHL: { |
| Label return_heap_number; |
| // Convert operands to 32-bit integers. Right in r2 and left in r3. The |
| // registers r0 and r1 (right and left) are preserved for the runtime |
| // call. |
| __ LoadNumberAsInt32(left, r3, heap_number_map, |
| scratch1, d0, d1, &transition); |
| __ LoadNumberAsInt32(right, r2, heap_number_map, |
| scratch1, d0, d1, &transition); |
| |
| // The ECMA-262 standard specifies that, for shift operations, only the |
| // 5 least significant bits of the shift value should be used. |
| switch (op_) { |
| case Token::BIT_OR: |
| __ orr(r2, r3, Operand(r2)); |
| break; |
| case Token::BIT_XOR: |
| __ eor(r2, r3, Operand(r2)); |
| break; |
| case Token::BIT_AND: |
| __ and_(r2, r3, Operand(r2)); |
| break; |
| case Token::SAR: |
| __ and_(r2, r2, Operand(0x1f)); |
| __ mov(r2, Operand(r3, ASR, r2)); |
| break; |
| case Token::SHR: |
| __ and_(r2, r2, Operand(0x1f)); |
| __ mov(r2, Operand(r3, LSR, r2), SetCC); |
| // SHR is special because it is required to produce a positive answer. |
| // We only get a negative result if the shift value (r2) is 0. |
| // This result cannot be respresented as a signed 32-bit integer, try |
| // to return a heap number if we can. |
| __ b(mi, (result_type_ <= BinaryOpIC::INT32) |
| ? &transition |
| : &return_heap_number); |
| break; |
| case Token::SHL: |
| __ and_(r2, r2, Operand(0x1f)); |
| __ mov(r2, Operand(r3, LSL, r2)); |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| |
| // Check if the result fits in a smi. If not try to return a heap number. |
| // (We know the result is an int32). |
| __ TrySmiTag(r0, r2, &return_heap_number); |
| __ Ret(); |
| |
| __ bind(&return_heap_number); |
| heap_number_result = r5; |
| BinaryOpStub_GenerateHeapResultAllocation(masm, |
| heap_number_result, |
| heap_number_map, |
| scratch1, |
| scratch2, |
| &call_runtime, |
| mode_); |
| |
| if (op_ != Token::SHR) { |
| // Convert the result to a floating point value. |
| __ vmov(double_scratch.low(), r2); |
| __ vcvt_f64_s32(double_scratch, double_scratch.low()); |
| } else { |
| // The result must be interpreted as an unsigned 32-bit integer. |
| __ vmov(double_scratch.low(), r2); |
| __ vcvt_f64_u32(double_scratch, double_scratch.low()); |
| } |
| |
| // Store the result. |
| __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); |
| __ vstr(double_scratch, r0, HeapNumber::kValueOffset); |
| __ mov(r0, heap_number_result); |
| __ Ret(); |
| |
| break; |
| } |
| |
| default: |
| UNREACHABLE(); |
| } |
| |
| // We never expect DIV to yield an integer result, so we always generate |
| // type transition code for DIV operations expecting an integer result: the |
| // code will fall through to this type transition. |
| if (transition.is_linked() || |
| ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) { |
| __ bind(&transition); |
| GenerateTypeTransition(masm); |
| } |
| |
| __ bind(&call_runtime); |
| { |
| FrameScope scope(masm, StackFrame::INTERNAL); |
| GenerateRegisterArgsPush(masm); |
| GenerateCallRuntime(masm); |
| } |
| __ Ret(); |
| } |
| |
| |
| void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { |
| Label call_runtime; |
| |
| if (op_ == Token::ADD) { |
| // Handle string addition here, because it is the only operation |
| // that does not do a ToNumber conversion on the operands. |
| GenerateAddStrings(masm); |
| } |
| |
| // Convert oddball arguments to numbers. |
| Label check, done; |
| __ CompareRoot(r1, Heap::kUndefinedValueRootIndex); |
| __ b(ne, &check); |
| if (Token::IsBitOp(op_)) { |
| __ mov(r1, Operand(Smi::FromInt(0))); |
| } else { |
| __ LoadRoot(r1, Heap::kNanValueRootIndex); |
| } |
| __ jmp(&done); |
| __ bind(&check); |
| __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); |
| __ b(ne, &done); |
| if (Token::IsBitOp(op_)) { |
| __ mov(r0, Operand(Smi::FromInt(0))); |
| } else { |
| __ LoadRoot(r0, Heap::kNanValueRootIndex); |
| } |
| __ bind(&done); |
| |
| GenerateNumberStub(masm); |
| } |
| |
| |
| void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { |
| Label call_runtime, transition; |
| BinaryOpStub_GenerateFPOperation( |
| masm, left_type_, right_type_, false, |
| &transition, &call_runtime, &transition, op_, mode_); |
| |
| __ bind(&transition); |
| GenerateTypeTransition(masm); |
| |
| __ bind(&call_runtime); |
| { |
| FrameScope scope(masm, StackFrame::INTERNAL); |
| GenerateRegisterArgsPush(masm); |
| GenerateCallRuntime(masm); |
| } |
| __ Ret(); |
| } |
| |
| |
| void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
| Label call_runtime, call_string_add_or_runtime, transition; |
| |
| BinaryOpStub_GenerateSmiCode( |
| masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_); |
| |
| BinaryOpStub_GenerateFPOperation( |
| masm, left_type_, right_type_, false, |
| &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_); |
| |
| __ bind(&transition); |
| GenerateTypeTransition(masm); |
| |
| __ bind(&call_string_add_or_runtime); |
| if (op_ == Token::ADD) { |
| GenerateAddStrings(masm); |
| } |
| |
| __ bind(&call_runtime); |
| { |
| FrameScope scope(masm, StackFrame::INTERNAL); |
| GenerateRegisterArgsPush(masm); |
| GenerateCallRuntime(masm); |
| } |
| __ Ret(); |
| } |
| |
| |
| void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { |
| ASSERT(op_ == Token::ADD); |
| Label left_not_string, call_runtime; |
| |
| Register left = r1; |
| Register right = r0; |
| |
| // Check if left argument is a string. |
| __ JumpIfSmi(left, &left_not_string); |
| __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE); |
| __ b(ge, &left_not_string); |
| |
| StringAddStub string_add_left_stub( |
| (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME)); |
| GenerateRegisterArgsPush(masm); |
| __ TailCallStub(&string_add_left_stub); |
| |
| // Left operand is not a string, test right. |
| __ bind(&left_not_string); |
| __ JumpIfSmi(right, &call_runtime); |
| __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE); |
| __ b(ge, &call_runtime); |
| |
| StringAddStub string_add_right_stub( |
| (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME)); |
| GenerateRegisterArgsPush(masm); |
| __ TailCallStub(&string_add_right_stub); |
| |
| // At least one argument is not a string. |
| __ bind(&call_runtime); |
| } |
| |
| |
| void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, |
| Register result, |
| Register heap_number_map, |
| Register scratch1, |
| Register scratch2, |
| Label* gc_required, |
| OverwriteMode mode) { |
| // Code below will scratch result if allocation fails. To keep both arguments |
| // intact for the runtime call result cannot be one of these. |
| ASSERT(!result.is(r0) && !result.is(r1)); |
| |
| if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) { |
| Label skip_allocation, allocated; |
| Register overwritable_operand = mode == OVERWRITE_LEFT ? r1 : r0; |
| // If the overwritable operand is already an object, we skip the |
| // allocation of a heap number. |
| __ JumpIfNotSmi(overwritable_operand, &skip_allocation); |
| // Allocate a heap number for the result. |
| __ AllocateHeapNumber( |
| result, scratch1, scratch2, heap_number_map, gc_required); |
| __ b(&allocated); |
| __ bind(&skip_allocation); |
| // Use object holding the overwritable operand for result. |
| __ mov(result, Operand(overwritable_operand)); |
| __ bind(&allocated); |
| } else { |
| ASSERT(mode == NO_OVERWRITE); |
| __ AllocateHeapNumber( |
| result, scratch1, scratch2, heap_number_map, gc_required); |
| } |
| } |
| |
| |
| void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { |
| __ Push(r1, r0); |
| } |
| |
| |
| void TranscendentalCacheStub::Generate(MacroAssembler* masm) { |
| // Untagged case: double input in d2, double result goes |
| // into d2. |
| // Tagged case: tagged input on top of stack and in r0, |
| // tagged result (heap number) goes into r0. |
| |
| Label input_not_smi; |
| Label loaded; |
| Label calculate; |
| Label invalid_cache; |
| const Register scratch0 = r9; |
| const Register scratch1 = r7; |
| const Register cache_entry = r0; |
| const bool tagged = (argument_type_ == TAGGED); |
| |
| if (tagged) { |
| // Argument is a number and is on stack and in r0. |
| // Load argument and check if it is a smi. |
| __ JumpIfNotSmi(r0, &input_not_smi); |
| |
| // Input is a smi. Convert to double and load the low and high words |
| // of the double into r2, r3. |
| __ SmiToDouble(d7, r0); |
| __ vmov(r2, r3, d7); |
| __ b(&loaded); |
| |
| __ bind(&input_not_smi); |
| // Check if input is a HeapNumber. |
| __ CheckMap(r0, |
| r1, |
| Heap::kHeapNumberMapRootIndex, |
| &calculate, |
| DONT_DO_SMI_CHECK); |
| // Input is a HeapNumber. Load it to a double register and store the |
| // low and high words into r2, r3. |
| __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset)); |
| __ vmov(r2, r3, d0); |
| } else { |
| // Input is untagged double in d2. Output goes to d2. |
| __ vmov(r2, r3, d2); |
| } |
| __ bind(&loaded); |
| // r2 = low 32 bits of double value |
| // r3 = high 32 bits of double value |
| // Compute hash (the shifts are arithmetic): |
| // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); |
| __ eor(r1, r2, Operand(r3)); |
| __ eor(r1, r1, Operand(r1, ASR, 16)); |
| __ eor(r1, r1, Operand(r1, ASR, 8)); |
| ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize)); |
| __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1)); |
| |
| // r2 = low 32 bits of double value. |
| // r3 = high 32 bits of double value. |
| // r1 = TranscendentalCache::hash(double value). |
| Isolate* isolate = masm->isolate(); |
| ExternalReference cache_array = |
| ExternalReference::transcendental_cache_array_address(isolate); |
| __ mov(cache_entry, Operand(cache_array)); |
| // cache_entry points to cache array. |
| int cache_array_index |
| = type_ * sizeof(isolate->transcendental_cache()->caches_[0]); |
| __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index)); |
| // r0 points to the cache for the type type_. |
| // If NULL, the cache hasn't been initialized yet, so go through runtime. |
| __ cmp(cache_entry, Operand::Zero()); |
| __ b(eq, &invalid_cache); |
| |
| #ifdef DEBUG |
| // Check that the layout of cache elements match expectations. |
| { TranscendentalCache::SubCache::Element test_elem[2]; |
| char* elem_start = reinterpret_cast<char*>(&test_elem[0]); |
| char* elem2_start = reinterpret_cast<char*>(&test_elem[1]); |
| char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0])); |
| char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1])); |
| char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output)); |
| CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. |
| CHECK_EQ(0, elem_in0 - elem_start); |
| CHECK_EQ(kIntSize, elem_in1 - elem_start); |
| CHECK_EQ(2 * kIntSize, elem_out - elem_start); |
| } |
| #endif |
| |
| // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12]. |
| __ add(r1, r1, Operand(r1, LSL, 1)); |
| __ add(cache_entry, cache_entry, Operand(r1, LSL, 2)); |
| // Check if cache matches: Double value is stored in uint32_t[2] array. |
| __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit()); |
| __ cmp(r2, r4); |
| __ cmp(r3, r5, eq); |
| __ b(ne, &calculate); |
| // Cache hit. Load result, cleanup and return. |
| Counters* counters = masm->isolate()->counters(); |
| __ IncrementCounter( |
| counters->transcendental_cache_hit(), 1, scratch0, scratch1); |
| if (tagged) { |
| // Pop input value from stack and load result into r0. |
| __ pop(); |
| __ mov(r0, Operand(r6)); |
| } else { |
| // Load result into d2. |
| __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset)); |
| } |
| __ Ret(); |
| |
| __ bind(&calculate); |
| __ IncrementCounter( |
| counters->transcendental_cache_miss(), 1, scratch0, scratch1); |
| if (tagged) { |
| __ bind(&invalid_cache); |
| ExternalReference runtime_function = |
| ExternalReference(RuntimeFunction(), masm->isolate()); |
| __ TailCallExternalReference(runtime_function, 1, 1); |
| } else { |
| Label no_update; |
| Label skip_cache; |
| |
| // Call C function to calculate the result and update the cache. |
| // r0: precalculated cache entry address. |
| // r2 and r3: parts of the double value. |
| // Store r0, r2 and r3 on stack for later before calling C function. |
| __ Push(r3, r2, cache_entry); |
| GenerateCallCFunction(masm, scratch0); |
| __ GetCFunctionDoubleResult(d2); |
| |
| // Try to update the cache. If we cannot allocate a |
| // heap number, we return the result without updating. |
| __ Pop(r3, r2, cache_entry); |
| __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex); |
| __ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update); |
| __ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset)); |
| __ stm(ia, cache_entry, r2.bit() | r3.bit() | r6.bit()); |
| __ Ret(); |
| |
| __ bind(&invalid_cache); |
| // The cache is invalid. Call runtime which will recreate the |
| // cache. |
| __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex); |
| __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache); |
| __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset)); |
| { |
| FrameScope scope(masm, StackFrame::INTERNAL); |
| __ push(r0); |
| __ CallRuntime(RuntimeFunction(), 1); |
| } |
| __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset)); |
| __ Ret(); |
| |
| __ bind(&skip_cache); |
| // Call C function to calculate the result and answer directly |
| // without updating the cache. |
| GenerateCallCFunction(masm, scratch0); |
| __ GetCFunctionDoubleResult(d2); |
| __ bind(&no_update); |
| |
| // We return the value in d2 without adding it to the cache, but |
| // we cause a scavenging GC so that future allocations will succeed. |
| { |
| FrameScope scope(masm, StackFrame::INTERNAL); |
| |
| // Allocate an aligned object larger than a HeapNumber. |
| ASSERT(4 * kPointerSize >= HeapNumber::kSize); |
| __ mov(scratch0, Operand(4 * kPointerSize)); |
| __ push(scratch0); |
| __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); |
| } |
| __ Ret(); |
| } |
| } |
| |
| |
| void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, |
| Register scratch) { |
| Isolate* isolate = masm->isolate(); |
| |
| __ push(lr); |
| __ PrepareCallCFunction(0, 1, scratch); |
| if (masm->use_eabi_hardfloat()) { |
| __ vmov(d0, d2); |
| } else { |
| __ vmov(r0, r1, d2); |
| } |
| AllowExternalCallThatCantCauseGC scope(masm); |
| switch (type_) { |
| case TranscendentalCache::SIN: |
| __ CallCFunction(ExternalReference::math_sin_double_function(isolate), |
| 0, 1); |
| break; |
| case TranscendentalCache::COS: |
| __ CallCFunction(ExternalReference::math_cos_double_function(isolate), |
| 0, 1); |
| break; |
| case TranscendentalCache::TAN: |
| __ CallCFunction(ExternalReference::math_tan_double_function(isolate), |
| 0, 1); |
| break; |
| case TranscendentalCache::LOG: |
| __ CallCFunction(ExternalReference::math_log_double_function(isolate), |
| 0, 1); |
| break; |
| default: |
| UNIMPLEMENTED(); |
| break; |
| } |
| __ pop(lr); |
| } |
| |
| |
| Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { |
| switch (type_) { |
| // Add more cases when necessary. |
| case TranscendentalCache::SIN: return Runtime::kMath_sin; |
| case TranscendentalCache::COS: return Runtime::kMath_cos; |
| case TranscendentalCache::TAN: return Runtime::kMath_tan; |
| case TranscendentalCache::LOG: return Runtime::kMath_log; |
| default: |
| UNIMPLEMENTED(); |
| return Runtime::kAbort; |
| } |
| } |
| |
| |
| void StackCheckStub::Generate(MacroAssembler* masm) { |
| __ TailCallRuntime(Runtime::kStackGuard, 0, 1); |
| } |
| |
| |
| void InterruptStub::Generate(MacroAssembler* masm) { |
| __ TailCallRuntime(Runtime::kInterrupt, 0, 1); |
| } |
| |
| |
| void MathPowStub::Generate(MacroAssembler* masm) { |
| const Register base = r1; |
| const Register exponent = r2; |
| const Register heapnumbermap = r5; |
| const Register heapnumber = r0; |
| const DwVfpRegister double_base = d1; |
| const DwVfpRegister double_exponent = d2; |
| const DwVfpRegister double_result = d3; |
| const DwVfpRegister double_scratch = d0; |
| const SwVfpRegister single_scratch = s0; |
| const Register scratch = r9; |
| const Register scratch2 = r7; |
| |
| Label call_runtime, done, int_exponent; |
| if (exponent_type_ == ON_STACK) { |
| Label base_is_smi, unpack_exponent; |
| // The exponent and base are supplied as arguments on the stack. |
| // This can only happen if the stub is called from non-optimized code. |
| // Load input parameters from stack to double registers. |
| __ ldr(base, MemOperand(sp, 1 * kPointerSize)); |
| __ ldr(exponent, MemOperand(sp, 0 * kPointerSize)); |
| |
| __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex); |
| |
| __ UntagAndJumpIfSmi(scratch, base, &base_is_smi); |
| __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset)); |
| __ cmp(scratch, heapnumbermap); |
| __ b(ne, &call_runtime); |
| |
| __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset)); |
| __ jmp(&unpack_exponent); |
| |
| __ bind(&base_is_smi); |
| __ vmov(single_scratch, scratch); |
| __ vcvt_f64_s32(double_base, single_scratch); |
| __ bind(&unpack_exponent); |
| |
| __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); |
| |
| __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset)); |
| __ cmp(scratch, heapnumbermap); |
| __ b(ne, &call_runtime); |
| __ vldr(double_exponent, |
| FieldMemOperand(exponent, HeapNumber::kValueOffset)); |
| } else if (exponent_type_ == TAGGED) { |
| // Base is already in double_base. |
| __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); |
| |
| __ vldr(double_exponent, |
| FieldMemOperand(exponent, HeapNumber::kValueOffset)); |
| } |
| |
| if (exponent_type_ != INTEGER) { |
| Label int_exponent_convert; |
| // Detect integer exponents stored as double. |
| __ vcvt_u32_f64(single_scratch, double_exponent); |
| // We do not check for NaN or Infinity here because comparing numbers on |
| // ARM correctly distinguishes NaNs. We end up calling the built-in. |
| __ vcvt_f64_u32(double_scratch, single_scratch); |
| __ VFPCompareAndSetFlags(double_scratch, double_exponent); |
| __ b(eq, &int_exponent_convert); |
| |
| if (exponent_type_ == ON_STACK) { |
| // Detect square root case. Crankshaft detects constant +/-0.5 at |
| // compile time and uses DoMathPowHalf instead. We then skip this check |
| // for non-constant cases of +/-0.5 as these hardly occur. |
| Label not_plus_half; |
| |
| // Test for 0.5. |
| __ vmov(double_scratch, 0.5, scratch); |
| __ VFPCompareAndSetFlags(double_exponent, double_scratch); |
| __ b(ne, ¬_plus_half); |
| |
| // Calculates square root of base. Check for the special case of |
| // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13). |
| __ vmov(double_scratch, -V8_INFINITY, scratch); |
| __ VFPCompareAndSetFlags(double_base, double_scratch); |
| __ vneg(double_result, double_scratch, eq); |
| __ b(eq, &done); |
| |
| // Add +0 to convert -0 to +0. |
| __ vadd(double_scratch, double_base, kDoubleRegZero); |
| __ vsqrt(double_result, double_scratch); |
| __ jmp(&done); |
| |
| __ bind(¬_plus_half); |
| __ vmov(double_scratch, -0.5, scratch); |
| __ VFPCompareAndSetFlags(double_exponent, double_scratch); |
| __ b(ne, &call_runtime); |
| |
| // Calculates square root of base. Check for the special case of |
| // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13). |
| __ vmov(double_scratch, -V8_INFINITY, scratch); |
| __ VFPCompareAndSetFlags(double_base, double_scratch); |
| __ vmov(double_result, kDoubleRegZero, eq); |
| __ b(eq, &done); |
| |
| // Add +0 to convert -0 to +0. |
| __ vadd(double_scratch, double_base, kDoubleRegZero); |
| __ vmov(double_result, 1.0, scratch); |
| __ vsqrt(double_scratch, double_scratch); |
| __ vdiv(double_result, double_result, double_scratch); |
| __ jmp(&done); |
| } |
| |
| __ push(lr); |
| { |
| AllowExternalCallThatCantCauseGC scope(masm); |
| __ PrepareCallCFunction(0, 2, scratch); |
| __ SetCallCDoubleArguments(double_base, double_exponent); |
| __ CallCFunction( |
| ExternalReference::power_double_double_function(masm->isolate()), |
| 0, 2); |
| } |
| __ pop(lr); |
| __ GetCFunctionDoubleResult(double_result); |
| __ jmp(&done); |
| |
| __ bind(&int_exponent_convert); |
| __ vcvt_u32_f64(single_scratch, double_exponent); |
| __ vmov(scratch, single_scratch); |
| } |
| |
| // Calculate power with integer exponent. |
| __ bind(&int_exponent); |
| |
| // Get two copies of exponent in the registers scratch and exponent. |
| if (exponent_type_ == INTEGER) { |
| __ mov(scratch, exponent); |
| } else { |
| // Exponent has previously been stored into scratch as untagged integer. |
| __ mov(exponent, scratch); |
| } |
| __ vmov(double_scratch, double_base); // Back up base. |
| __ vmov(double_result, 1.0, scratch2); |
| |
| // Get absolute value of exponent. |
| __ cmp(scratch, Operand::Zero()); |
| __ mov(scratch2, Operand::Zero(), LeaveCC, mi); |
| __ sub(scratch, scratch2, scratch, LeaveCC, mi); |
| |
| Label while_true; |
| __ bind(&while_true); |
| __ mov(scratch, Operand(scratch, ASR, 1), SetCC); |
| __ vmul(double_result, double_result, double_scratch, cs); |
| __ vmul(double_scratch, double_scratch, double_scratch, ne); |
| __ b(ne, &while_true); |
| |
| __ cmp(exponent, Operand::Zero()); |
| __ b(ge, &done); |
| __ vmov(double_scratch, 1.0, scratch); |
| __ vdiv(double_result, double_scratch, double_result); |
| // Test whether result is zero. Bail out to check for subnormal result. |
| // Due to subnormals, x^-y == (1/x)^y does not hold in all cases. |
| __ VFPCompareAndSetFlags(double_result, 0.0); |
| __ b(ne, &done); |
| // double_exponent may not containe the exponent value if the input was a |
| // smi. We set it with exponent value before bailing out. |
| __ vmov(single_scratch, exponent); |
| __ vcvt_f64_s32(double_exponent, single_scratch); |
| |
| // Returning or bailing out. |
| Counters* counters = masm->isolate()->counters(); |
| if (exponent_type_ == ON_STACK) { |
| // The arguments are still on the stack. |
| __ bind(&call_runtime); |
| __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); |
| |
| // The stub is called from non-optimized code, which expects the result |
| // as heap number in exponent. |
| __ bind(&done); |
| __ AllocateHeapNumber( |
| heapnumber, scratch, scratch2, heapnumbermap, &call_runtime); |
| __ vstr(double_result, |
| FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); |
| ASSERT(heapnumber.is(r0)); |
| __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2); |
| __ Ret(2); |
| } else { |
| __ push(lr); |
| { |
| AllowExternalCallThatCantCauseGC scope(masm); |
| __ PrepareCallCFunction(0, 2, scratch); |
| __ SetCallCDoubleArguments(double_base, double_exponent); |
| __ CallCFunction( |
| ExternalReference::power_double_double_function(masm->isolate()), |
| 0, 2); |
| } |
| __ pop(lr); |
| __ GetCFunctionDoubleResult(double_result); |
| |
| __ bind(&done); |
| __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2); |
| __ Ret(); |
| } |
| } |
| |
| |
| bool CEntryStub::NeedsImmovableCode() { |
| return true; |
| } |
| |
| |
| bool CEntryStub::IsPregenerated() { |
| return (!save_doubles_ || ISOLATE->fp_stubs_generated()) && |
| result_size_ == 1; |
| } |
| |
| |
| void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { |
| CEntryStub::GenerateAheadOfTime(isolate); |
| WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate); |
| StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); |
| StubFailureTrampolineStub::GenerateAheadOfTime(isolate); |
| RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); |
| ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); |
| CreateAllocationSiteStub::GenerateAheadOfTime(isolate); |
| } |
| |
| |
| void CodeStub::GenerateFPStubs(Isolate* isolate) { |
| SaveFPRegsMode mode = kSaveFPRegs; |
| CEntryStub save_doubles(1, mode); |
| StoreBufferOverflowStub stub(mode); |
| // These stubs might already be in the snapshot, detect that and don't |
| // regenerate, which would lead to code stub initialization state being messed |
| // up. |
| Code* save_doubles_code; |
| if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) { |
| save_doubles_code = *save_doubles.GetCode(isolate); |
| } |
| Code* store_buffer_overflow_code; |
| if (!stub.FindCodeInCache(&store_buffer_overflow_code, isolate)) { |
| store_buffer_overflow_code = *stub.GetCode(isolate); |
| } |
| save_doubles_code->set_is_pregenerated(true); |
| store_buffer_overflow_code->set_is_pregenerated(true); |
| isolate->set_fp_stubs_generated(true); |
| } |
| |
| |
| void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { |
| CEntryStub stub(1, kDontSaveFPRegs); |
| Handle<Code> code = stub.GetCode(isolate); |
| code->set_is_pregenerated(true); |
| } |
| |
| |
| static void JumpIfOOM(MacroAssembler* masm, |
| Register value, |
| Register scratch, |
| Label* oom_label) { |
| STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3); |
| STATIC_ASSERT(kFailureTag == 3); |
| __ and_(scratch, value, Operand(0xf)); |
| __ cmp(scratch, Operand(0xf)); |
| __ b(eq, oom_label); |
| } |
| |
| |
| void CEntryStub::GenerateCore(MacroAssembler* masm, |
| Label* throw_normal_exception, |
| Label* throw_termination_exception, |
| Label* throw_out_of_memory_exception, |
| bool do_gc, |
| bool always_allocate) { |
| // r0: result parameter for PerformGC, if any |
| // r4: number of arguments including receiver (C callee-saved) |
| // r5: pointer to builtin function (C callee-saved) |
| // r6: pointer to the first argument (C callee-saved) |
| Isolate* isolate = masm->isolate(); |
| |
| if (do_gc) { |
| // Passing r0. |
| __ PrepareCallCFunction(1, 0, r1); |
| __ CallCFunction(ExternalReference::perform_gc_function(isolate), |
| 1, 0); |
| } |
| |
| ExternalReference scope_depth = |
| ExternalReference::heap_always_allocate_scope_depth(isolate); |
| if (always_allocate) { |
| __ mov(r0, Operand(scope_depth)); |
| __ ldr(r1, MemOperand(r0)); |
| __ add(r1, r1, Operand(1)); |
| __ str(r1, MemOperand(r0)); |
| } |
| |
| // Call C built-in. |
| // r0 = argc, r1 = argv |
| __ mov(r0, Operand(r4)); |
| __ mov(r1, Operand(r6)); |
| |
| #if V8_HOST_ARCH_ARM |
| int frame_alignment = MacroAssembler::ActivationFrameAlignment(); |
| int frame_alignment_mask = frame_alignment - 1; |
| if (FLAG_debug_code) { |
| if (frame_alignment > kPointerSize) { |
| Label alignment_as_expected; |
| ASSERT(IsPowerOf2(frame_alignment)); |
| __ tst(sp, Operand(frame_alignment_mask)); |
| __ b(eq, &alignment_as_expected); |
| // Don't use Check here, as it will call Runtime_Abort re-entering here. |
| __ stop("Unexpected alignment"); |
| __ bind(&alignment_as_expected); |
| } |
| } |
| #endif |
| |
| __ mov(r2, Operand(ExternalReference::isolate_address(isolate))); |
| |
| // To let the GC traverse the return address of the exit frames, we need to |
| // know where the return address is. The CEntryStub is unmovable, so |
| // we can store the address on the stack to be able to find it again and |
| // we never have to restore it, because it will not change. |
| // Compute the return address in lr to return to after the jump below. Pc is |
| // already at '+ 8' from the current instruction but return is after three |
| // instructions so add another 4 to pc to get the return address. |
| { |
| // Prevent literal pool emission before return address. |
| Assembler::BlockConstPoolScope block_const_pool(masm); |
| masm->add(lr, pc, Operand(4)); |
| __ str(lr, MemOperand(sp, 0)); |
| masm->Jump(r5); |
| } |
| |
| __ VFPEnsureFPSCRState(r2); |
| |
| if (always_allocate) { |
| // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1 |
| // though (contain the result). |
| __ mov(r2, Operand(scope_depth)); |
| __ ldr(r3, MemOperand(r2)); |
| __ sub(r3, r3, Operand(1)); |
| __ str(r3, MemOperand(r2)); |
| } |
| |
| // check for failure result |
| Label failure_returned; |
| STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); |
| // Lower 2 bits of r2 are 0 iff r0 has failure tag. |
| __ add(r2, r0, Operand(1)); |
| __ tst(r2, Operand(kFailureTagMask)); |
| __ b(eq, &failure_returned); |
| |
| // Exit C frame and return. |
| // r0:r1: result |
| // sp: stack pointer |
| // fp: frame pointer |
| // Callee-saved register r4 still holds argc. |
| __ LeaveExitFrame(save_doubles_, r4); |
| __ mov(pc, lr); |
| |
| // check if we should retry or throw exception |
| Label retry; |
| __ bind(&failure_returned); |
| STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); |
| __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize)); |
| __ b(eq, &retry); |
| |
| // Special handling of out of memory exceptions. |
| JumpIfOOM(masm, r0, ip, throw_out_of_memory_exception); |
| |
| // Retrieve the pending exception. |
| __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, |
| isolate))); |
| __ ldr(r0, MemOperand(ip)); |
| |
| // See if we just retrieved an OOM exception. |
| JumpIfOOM(masm, r0, ip, throw_out_of_memory_exception); |
| |
| // Clear the pending exception. |
| __ mov(r3, Operand(isolate->factory()->the_hole_value())); |
| __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, |
| isolate))); |
| __ str(r3, MemOperand(ip)); |
| |
| // Special handling of termination exceptions which are uncatchable |
| // by javascript code. |
| __ cmp(r0, Operand(isolate->factory()->termination_exception())); |
| __ b(eq, throw_termination_exception); |
| |
| // Handle normal exception. |
| __ jmp(throw_normal_exception); |
| |
| __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying |
| } |
| |
| |
| void CEntryStub::Generate(MacroAssembler* masm) { |
| // Called from JavaScript; parameters are on stack as if calling JS function |
| // r0: number of arguments including receiver |
| // r1: pointer to builtin function |
| // fp: frame pointer (restored after C call) |
| // sp: stack pointer (restored as callee's sp after C call) |
| // cp: current context (C callee-saved) |
| |
| ProfileEntryHookStub::MaybeCallEntryHook(masm); |
| |
| // Result returned in r0 or r0+r1 by default. |
| |
| // NOTE: Invocations of builtins may return failure objects |
| // instead of a proper result. The builtin entry handles |
| // this by performing a garbage collection and retrying the |
| // builtin once. |
| |
| // Compute the argv pointer in a callee-saved register. |
| __ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2)); |
| __ sub(r6, r6, Operand(kPointerSize)); |
| |
| // Enter the exit frame that transitions from JavaScript to C++. |
| FrameScope scope(masm, StackFrame::MANUAL); |
| __ EnterExitFrame(save_doubles_); |
| |
| // Set up argc and the builtin function in callee-saved registers. |
| __ mov(r4, Operand(r0)); |
| __ mov(r5, Operand(r1)); |
| |
| // r4: number of arguments (C callee-saved) |
| // r5: pointer to builtin function (C callee-saved) |
| // r6: pointer to first argument (C callee-saved) |
| |
| Label throw_normal_exception; |
| Label throw_termination_exception; |
| Label throw_out_of_memory_exception; |
| |
| // Call into the runtime system. |
| GenerateCore(masm, |
| &throw_normal_exception, |
| &throw_termination_exception, |
| &throw_out_of_memory_exception, |
| false, |
| false); |
| |
| // Do space-specific GC and retry runtime call. |
| GenerateCore(masm, |
| &throw_normal_exception, |
| &throw_termination_exception, |
| &throw_out_of_memory_exception, |
| true, |
| false); |
| |
| // Do full GC and retry runtime call one final time. |
| Failure* failure = Failure::InternalError(); |
| __ mov(r0, Operand(reinterpret_cast<int32_t>(failure))); |
| GenerateCore(masm, |
| &throw_normal_exception, |
| &throw_termination_exception, |
| &throw_out_of_memory_exception, |
| true, |
| true); |
| |
| __ bind(&throw_out_of_memory_exception); |
| // Set external caught exception to false. |
| Isolate* isolate = masm->isolate(); |
| ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress, |
| isolate); |
| __ mov(r0, Operand(false, RelocInfo::NONE32)); |
| __ mov(r2, Operand(external_caught)); |
| __ str(r0, MemOperand(r2)); |
| |
| // Set pending exception and r0 to out of memory exception. |
| Label already_have_failure; |
| JumpIfOOM(masm, r0, ip, &already_have_failure); |
| Failure* out_of_memory = Failure::OutOfMemoryException(0x1); |
| __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory))); |
| __ bind(&already_have_failure); |
| __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, |
| isolate))); |
| __ str(r0, MemOperand(r2)); |
| // Fall through to the next label. |
| |
| __ bind(&throw_termination_exception); |
| __ ThrowUncatchable(r0); |
| |
| __ bind(&throw_normal_exception); |
| __ Throw(r0); |
| } |
| |
| |
| void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { |
| // r0: code entry |
| // r1: function |
| // r2: receiver |
| // r3: argc |
| // [sp+0]: argv |
| |
| Label invoke, handler_entry, exit; |
| |
| ProfileEntryHookStub::MaybeCallEntryHook(masm); |
| |
| // Called from C, so do not pop argc and args on exit (preserve sp) |
| // No need to save register-passed args |
| // Save callee-saved registers (incl. cp and fp), sp, and lr |
| __ stm(db_w, sp, kCalleeSaved | lr.bit()); |
| |
| // Save callee-saved vfp registers. |
| __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); |
| // Set up the reserved register for 0.0. |
| __ vmov(kDoubleRegZero, 0.0); |
| __ VFPEnsureFPSCRState(r4); |
| |
| // Get address of argv, see stm above. |
| // r0: code entry |
| // r1: function |
| // r2: receiver |
| // r3: argc |
| |
| // Set up argv in r4. |
| int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; |
| offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize; |
| __ ldr(r4, MemOperand(sp, offset_to_argv)); |
| |
| // Push a frame with special values setup to mark it as an entry frame. |
| // r0: code entry |
| // r1: function |
| // r2: receiver |
| // r3: argc |
| // r4: argv |
| Isolate* isolate = masm->isolate(); |
| __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used. |
| int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; |
| __ mov(r7, Operand(Smi::FromInt(marker))); |
| __ mov(r6, Operand(Smi::FromInt(marker))); |
| __ mov(r5, |
| Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate))); |
| __ ldr(r5, MemOperand(r5)); |
| __ Push(r8, r7, r6, r5); |
| |
| // Set up frame pointer for the frame to be pushed. |
| __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); |
| |
| // If this is the outermost JS call, set js_entry_sp value. |
| Label non_outermost_js; |
| ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate); |
| __ mov(r5, Operand(ExternalReference(js_entry_sp))); |
| __ ldr(r6, MemOperand(r5)); |
| __ cmp(r6, Operand::Zero()); |
| __ b(ne, &non_outermost_js); |
| __ str(fp, MemOperand(r5)); |
| __ mov(ip, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); |
| Label cont; |
| __ b(&cont); |
| __ bind(&non_outermost_js); |
| __ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME))); |
| __ bind(&cont); |
| __ push(ip); |
| |
| // Jump to a faked try block that does the invoke, with a faked catch |
| // block that sets the pending exception. |
| __ jmp(&invoke); |
| |
| // Block literal pool emission whilst taking the position of the handler |
| // entry. This avoids making the assumption that literal pools are always |
| // emitted after an instruction is emitted, rather than before. |
| { |
| Assembler::BlockConstPoolScope block_const_pool(masm); |
| __ bind(&handler_entry); |
| handler_offset_ = handler_entry.pos(); |
| // Caught exception: Store result (exception) in the pending exception |
| // field in the JSEnv and return a failure sentinel. Coming in here the |
| // fp will be invalid because the PushTryHandler below sets it to 0 to |
| // signal the existence of the JSEntry frame. |
| __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, |
| isolate))); |
| } |
| __ str(r0, MemOperand(ip)); |
| __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception()))); |
| __ b(&exit); |
| |
| // Invoke: Link this frame into the handler chain. There's only one |
| // handler block in this code object, so its index is 0. |
| __ bind(&invoke); |
| // Must preserve r0-r4, r5-r7 are available. |
| __ PushTryHandler(StackHandler::JS_ENTRY, 0); |
| // If an exception not caught by another handler occurs, this handler |
| // returns control to the code after the bl(&invoke) above, which |
| // restores all kCalleeSaved registers (including cp and fp) to their |
| // saved values before returning a failure to C. |
| |
| // Clear any pending exceptions. |
| __ mov(r5, Operand(isolate->factory()->the_hole_value())); |
| __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, |
| isolate))); |
| __ str(r5, MemOperand(ip)); |
| |
| // Invoke the function by calling through JS entry trampoline builtin. |
| // Notice that we cannot store a reference to the trampoline code directly in |
| // this stub, because runtime stubs are not traversed when doing GC. |
| |
| // Expected registers by Builtins::JSEntryTrampoline |
| // r0: code entry |
| // r1: function |
| // r2: receiver |
| // r3: argc |
| // r4: argv |
| if (is_construct) { |
| ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline, |
| isolate); |
| __ mov(ip, Operand(construct_entry)); |
| } else { |
| ExternalReference entry(Builtins::kJSEntryTrampoline, isolate); |
| __ mov(ip, Operand(entry)); |
| } |
| __ ldr(ip, MemOperand(ip)); // deref address |
| |
| // Branch and link to JSEntryTrampoline. We don't use the double underscore |
| // macro for the add instruction because we don't want the coverage tool |
| // inserting instructions here after we read the pc. We block literal pool |
| // emission for the same reason. |
| { |
| Assembler::BlockConstPoolScope block_const_pool(masm); |
| __ mov(lr, Operand(pc)); |
| masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| } |
| |
| // Unlink this frame from the handler chain. |
| __ PopTryHandler(); |
| |
| __ bind(&exit); // r0 holds result |
| // Check if the current stack frame is marked as the outermost JS frame. |
| Label non_outermost_js_2; |
| __ pop(r5); |
| __ cmp(r5, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); |
| __ b(ne, &non_outermost_js_2); |
| __ mov(r6, Operand::Zero()); |
| __ mov(r5, Operand(ExternalReference(js_entry_sp))); |
| __ str(r6, MemOperand(r5)); |
| __ bind(&non_outermost_js_2); |
| |
| // Restore the top frame descriptors from the stack. |
| __ pop(r3); |
| __ mov(ip, |
| Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate))); |
| __ str(r3, MemOperand(ip)); |
| |
| // Reset the stack to the callee saved registers. |
| __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); |
| |
| // Restore callee-saved registers and return. |
| #ifdef DEBUG |
| if (FLAG_debug_code) { |
| __ mov(lr, Operand(pc)); |
| } |
| #endif |
| |
| // Restore callee-saved vfp registers. |
| __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); |
| |
| __ ldm(ia_w, sp, kCalleeSaved | pc.bit()); |
| } |
| |
| |
| // Uses registers r0 to r4. |
| // Expected input (depending on whether args are in registers or on the stack): |
| // * object: r0 or at sp + 1 * kPointerSize. |
| // * function: r1 or at sp. |
| // |
| // An inlined call site may have been generated before calling this stub. |
| // In this case the offset to the inline site to patch is passed on the stack, |
| // in the safepoint slot for register r4. |
| // (See LCodeGen::DoInstanceOfKnownGlobal) |
| void InstanceofStub::Generate(MacroAssembler* masm) { |
| // Call site inlining and patching implies arguments in registers. |
| ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck()); |
| // ReturnTrueFalse is only implemented for inlined call sites. |
| ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck()); |
| |
| // Fixed register usage throughout the stub: |
| const Register object = r0; // Object (lhs). |
| Register map = r3; // Map of the object. |
| const Register function = r1; // Function (rhs). |
| const Register prototype = r4; // Prototype of the function. |
| const Register inline_site = r9; |
| const Register scratch = r2; |
| |
| const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize; |
| |
| Label slow, loop, is_instance, is_not_instance, not_js_object; |
| |
| if (!HasArgsInRegisters()) { |
| __ ldr(object, MemOperand(sp, 1 * kPointerSize)); |
| __ ldr(function, MemOperand(sp, 0)); |
| } |
| |
| // Check that the left hand is a JS object and load map. |
| __ JumpIfSmi(object, ¬_js_object); |
| __ IsObjectJSObjectType(object, map, scratch, ¬_js_object); |
| |
| // If there is a call site cache don't look in the global cache, but do the |
| // real lookup and update the call site cache. |
| if (!HasCallSiteInlineCheck()) { |
| Label miss; |
| __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex); |
| __ b(ne, &miss); |
| __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex); |
| __ b(ne, &miss); |
| __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); |
| __ Ret(HasArgsInRegisters() ? 0 : 2); |
| |
| __ bind(&miss); |
| } |
| |
| // Get the prototype of the function. |
| __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true); |
| |
| // Check that the function prototype is a JS object. |
| __ JumpIfSmi(prototype, &slow); |
| __ IsObjectJSObjectType(prototype, scratch, scratch, &slow); |
| |
| // Update the global instanceof or call site inlined cache with the current |
| // map and function. The cached answer will be set when it is known below. |
| if (!HasCallSiteInlineCheck()) { |
| __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); |
| __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); |
| } else { |
| ASSERT(HasArgsInRegisters()); |
| // Patch the (relocated) inlined map check. |
| |
| // The offset was stored in r4 safepoint slot. |
| // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal) |
| __ LoadFromSafepointRegisterSlot(scratch, r4); |
| __ sub(inline_site, lr, scratch); |
| // Get the map location in scratch and patch it. |
| __ GetRelocatedValueLocation(inline_site, scratch); |
| __ ldr(scratch, MemOperand(scratch)); |
| __ str(map, FieldMemOperand(scratch, Cell::kValueOffset)); |
| } |
| |
| // Register mapping: r3 is object map and r4 is function prototype. |
| // Get prototype of object into r2. |
| __ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset)); |
| |
| // We don't need map any more. Use it as a scratch register. |
| Register scratch2 = map; |
| map = no_reg; |
| |
| // Loop through the prototype chain looking for the function prototype. |
| __ LoadRoot(scratch2, Heap::kNullValueRootIndex); |
| __ bind(&loop); |
| __ cmp(scratch, Operand(prototype)); |
| __ b(eq, &is_instance); |
| __ cmp(scratch, scratch2); |
| __ b(eq, &is_not_instance); |
| __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); |
| __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset)); |
| __ jmp(&loop); |
| |
| __ bind(&is_instance); |
| if (!HasCallSiteInlineCheck()) { |
| __ mov(r0, Operand(Smi::FromInt(0))); |
| __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); |
| } else { |
| // Patch the call site to return true. |
| __ LoadRoot(r0, Heap::kTrueValueRootIndex); |
| __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); |
| // Get the boolean result location in scratch and patch it. |
| __ GetRelocatedValueLocation(inline_site, scratch); |
| __ str(r0, MemOperand(scratch)); |
| |
| if (!ReturnTrueFalseObject()) { |
| __ mov(r0, Operand(Smi::FromInt(0))); |
| } |
| } |
| __ Ret(HasArgsInRegisters() ? 0 : 2); |
| |
| __ bind(&is_not_instance); |
| if (!HasCallSiteInlineCheck()) { |
| __ mov(r0, Operand(Smi::FromInt(1))); |
| __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); |
| } else { |
| // Patch the call site to return false. |
| __ LoadRoot(r0, Heap::kFalseValueRootIndex); |
| __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); |
| // Get the boolean result location in scratch and patch it. |
| __ GetRelocatedValueLocation(inline_site, scratch); |
| __ str(r0, MemOperand(scratch)); |
| |
| if (!ReturnTrueFalseObject()) { |
| __ mov(r0, Operand(Smi::FromInt(1))); |
| } |
| } |
| __ Ret(HasArgsInRegisters() ? 0 : 2); |
| |
| Label object_not_null, object_not_null_or_smi; |
| __ bind(¬_js_object); |
| // Before null, smi and string value checks, check that the rhs is a function |
| // as for a non-function rhs an exception needs to be thrown. |
| __ JumpIfSmi(function, &slow); |
| __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE); |
| __ b(ne, &slow); |
| |
| // Null is not instance of anything. |
| __ cmp(scratch, Operand(masm->isolate()->factory()->null_value())); |
| __ b(ne, &object_not_null); |
| __ mov(r0, Operand(Smi::FromInt(1))); |
| __ Ret(HasArgsInRegisters() ? 0 : 2); |
| |
| __ bind(&object_not_null); |
| // Smi values are not instances of anything. |
| __ JumpIfNotSmi(object, &object_not_null_or_smi); |
| __ mov(r0, Operand(Smi::FromInt(1))); |
| __ Ret(HasArgsInRegisters() ? 0 : 2); |
| |
| __ bind(&object_not_null_or_smi); |
| // String values are not instances of anything. |
| __ IsObjectJSStringType(object, scratch, &slow); |
| __ mov(r0, Operand(Smi::FromInt(1))); |
| __ Ret(HasArgsInRegisters() ? 0 : 2); |
| |
| // Slow-case. Tail call builtin. |
| __ bind(&slow); |
| if (!ReturnTrueFalseObject()) { |
| if (HasArgsInRegisters()) { |
| __ Push(r0, r1); |
| } |
| __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); |
| } else { |
| { |
| FrameScope scope(masm, StackFrame::INTERNAL); |
| __ Push(r0, r1); |
| __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); |
| } |
| __ cmp(r0, Operand::Zero()); |
| __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq); |
| __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne); |
| __ Ret(HasArgsInRegisters() ? 0 : 2); |
| } |
| } |
| |
| |
| void FunctionPrototypeStub::Generate(MacroAssembler* masm) { |
| Label miss; |
| Register receiver; |
| if (kind() == Code::KEYED_LOAD_IC) { |
| // ----------- S t a t e ------------- |
| // -- lr : return address |
| // -- r0 : key |
| // -- r1 : receiver |
| // ----------------------------------- |
| __ cmp(r0, Operand(masm->isolate()->factory()->prototype_string())); |
| __ b(ne, &miss); |
| receiver = r1; |
| } else { |
| ASSERT(kind() == Code::LOAD_IC); |
| // ----------- S t a t e ------------- |
| // -- r2 : name |
| // -- lr : return address |
| // -- r0 : receiver |
| // -- sp[0] : receiver |
| // ----------------------------------- |
| receiver = r0; |
| } |
| |
| StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, r3, r4, &miss); |
| __ bind(&miss); |
| StubCompiler::TailCallBuiltin( |
| masm, BaseLoadStoreStubCompiler::MissBuiltin(kind())); |
| } |
| |
| |
| void StringLengthStub::Generate(MacroAssembler* masm) { |
| Label miss; |
| Register receiver; |
| if (kind() == Code::KEYED_LOAD_IC) { |
| // ----------- S t a t e ------------- |
| // -- lr : return address |
| // -- r0 : key |
| // -- r1 : receiver |
| // ----------------------------------- |
| __ cmp(r0, Operand(masm->isolate()->factory()->length_string())); |
| __ b(ne, &miss); |
| receiver = r1; |
| } else { |
| ASSERT(kind() == Code::LOAD_IC); |
| // ----------- S t a t e ------------- |
| // -- r2 : name |
| // -- lr : return address |
| // -- r0 : receiver |
| // -- sp[0] : receiver |
| // ----------------------------------- |
| receiver = r0; |
| } |
| |
| StubCompiler::GenerateLoadStringLength(masm, receiver, r3, r4, &miss, |
| support_wrapper_); |
| |
| __ bind(&miss); |
| StubCompiler::TailCallBuiltin( |
| masm, BaseLoadStoreStubCompiler::MissBuiltin(kind())); |
| } |
| |
| |
| void StoreArrayLengthStub::Generate(MacroAssembler* masm) { |
| // This accepts as a receiver anything JSArray::SetElementsLength accepts |
| // (currently anything except for external arrays which means anything with |
| // elements of FixedArray type). Value must be a number, but only smis are |
| // accepted as the most common case. |
| Label miss; |
| |
| Register receiver; |
| Register value; |
| if (kind() == Code::KEYED_STORE_IC) { |
| // ----------- S t a t e ------------- |
| // -- lr : return address |
| // -- r0 : value |
| // -- r1 : key |
| // -- r2 : receiver |
| // ----------------------------------- |
| __ cmp(r1, Operand(masm->isolate()->factory()->length_string())); |
| __ b(ne, &miss); |
| receiver = r2; |
| value = r0; |
| } else { |
| ASSERT(kind() == Code::STORE_IC); |
| // ----------- S t a t e ------------- |
| // -- lr : return address |
| // -- r0 : value |
| // -- r1 : receiver |
| // -- r2 : key |
| // ----------------------------------- |
| receiver = r1; |
| value = r0; |
| } |
| Register scratch = r3; |
| |
| // Check that the receiver isn't a smi. |
| __ JumpIfSmi(receiver, &miss); |
| |
| // Check that the object is a JS array. |
| __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE); |
| __ b(ne, &miss); |
| |
| // Check that elements are FixedArray. |
| // We rely on StoreIC_ArrayLength below to deal with all types of |
| // fast elements (including COW). |
| __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset)); |
| __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE); |
| __ b(ne, &miss); |
| |
| // Check that the array has fast properties, otherwise the length |
| // property might have been redefined. |
| __ ldr(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset)); |
| __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset)); |
| __ CompareRoot(scratch, Heap::kHashTableMapRootIndex); |
| __ b(eq, &miss); |
| |
| // Check that value is a smi. |
| __ JumpIfNotSmi(value, &miss); |
| |
| // Prepare tail call to StoreIC_ArrayLength. |
| __ Push(receiver, value); |
| |
| ExternalReference ref = |
| ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate()); |
| __ TailCallExternalReference(ref, 2, 1); |
| |
| __ bind(&miss); |
| |
| StubCompiler::TailCallBuiltin( |
| masm, BaseLoadStoreStubCompiler::MissBuiltin(kind())); |
| } |
| |
| |
| Register InstanceofStub::left() { return r0; } |
| |
| |
| Register InstanceofStub::right() { return r1; } |
| |
| |
| void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { |
| // The displacement is the offset of the last parameter (if any) |
| // relative to the frame pointer. |
| const int kDisplacement = |
| StandardFrameConstants::kCallerSPOffset - kPointerSize; |
| |
| // Check that the key is a smi. |
| Label slow; |
| __ JumpIfNotSmi(r1, &slow); |
| |
| // Check if the calling frame is an arguments adaptor frame. |
| Label adaptor; |
| __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
| __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); |
| __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
| __ b(eq, &adaptor); |
| |
| // Check index against formal parameters count limit passed in |
| // through register r0. Use unsigned comparison to get negative |
| // check for free. |
| __ cmp(r1, r0); |
| __ b(hs, &slow); |
| |
| // Read the argument from the stack and return it. |
| __ sub(r3, r0, r1); |
| __ add(r3, fp, Operand::PointerOffsetFromSmiKey(r3)); |
| __ ldr(r0, MemOperand(r3, kDisplacement)); |
| __ Jump(lr); |
| |
| // Arguments adaptor case: Check index against actual arguments |
| // limit found in the arguments adaptor frame. Use unsigned |
| // comparison to get negative check for free. |
| __ bind(&adaptor); |
| __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
| __ cmp(r1, r0); |
| __ b(cs, &slow); |
| |
| // Read the argument from the adaptor frame and return it. |
| __ sub(r3, r0, r1); |
| __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r3)); |
| __ ldr(r0, MemOperand(r3, kDisplacement)); |
| __ Jump(lr); |
| |
| // Slow-case: Handle non-smi or out-of-bounds access to arguments |
| // by calling the runtime system. |
| __ bind(&slow); |
| __ push(r1); |
| __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); |
| } |
| |
| |
| void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) { |
| // sp[0] : number of parameters |
| // sp[4] : receiver displacement |
| // sp[8] : function |
| |
| // Check if the calling frame is an arguments adaptor frame. |
| Label runtime; |
| __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
| __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset)); |
| __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
| __ b(ne, &runtime); |
| |
| // Patch the arguments.length and the parameters pointer in the current frame. |
| __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
| __ str(r2, MemOperand(sp, 0 * kPointerSize)); |
| __ add(r3, r3, Operand(r2, LSL, 1)); |
| __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); |
| __ str(r3, MemOperand(sp, 1 * kPointerSize)); |
| |
| __ bind(&runtime); |
| __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); |
| } |
| |
| |
| void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { |
| // Stack layout: |
| // sp[0] : number of parameters (tagged) |
| // sp[4] : address of receiver argument |
| // sp[8] : function |
| // Registers used over whole function: |
| // r6 : allocated object (tagged) |
| // r9 : mapped parameter count (tagged) |
| |
| __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); |
| // r1 = parameter count (tagged) |
| |
| // Check if the calling frame is an arguments adaptor frame. |
| Label runtime; |
| Label adaptor_frame, try_allocate; |
| __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
| __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset)); |
| __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
| __ b(eq, &adaptor_frame); |
| |
| // No adaptor, parameter count = argument count. |
| __ mov(r2, r1); |
| __ b(&try_allocate); |
| |
| // We have an adaptor frame. Patch the parameters pointer. |
| __ bind(&adaptor_frame); |
| __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
| __ add(r3, r3, Operand(r2, LSL, 1)); |
| __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); |
| __ str(r3, MemOperand(sp, 1 * kPointerSize)); |
| |
| // r1 = parameter count (tagged) |
| // r2 = argument count (tagged) |
| // Compute the mapped parameter count = min(r1, r2) in r1. |
| __ cmp(r1, Operand(r2)); |
| __ mov(r1, Operand(r2), LeaveCC, gt); |
| |
| __ bind(&try_allocate); |
| |
| // Compute the sizes of backing store, parameter map, and arguments object. |
| // 1. Parameter map, has 2 extra words containing context and backing store. |
| const int kParameterMapHeaderSize = |
| FixedArray::kHeaderSize + 2 * kPointerSize; |
| // If there are no mapped parameters, we do not need the parameter_map. |
| __ cmp(r1, Operand(Smi::FromInt(0))); |
| __ mov(r9, Operand::Zero(), LeaveCC, eq); |
| __ mov(r9, Operand(r1, LSL, 1), LeaveCC, ne); |
| __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne); |
| |
| // 2. Backing store. |
| __ add(r9, r9, Operand(r2, LSL, 1)); |
| __ add(r9, r9, Operand(FixedArray::kHeaderSize)); |
| |
| // 3. Arguments object. |
| __ add(r9, r9, Operand(Heap::kArgumentsObjectSize)); |
| |
| // Do the allocation of all three objects in one go. |
| __ Allocate(r9, r0, r3, r4, &runtime, TAG_OBJECT); |
| |
| // r0 = address of new object(s) (tagged) |
| // r2 = argument count (tagged) |
| // Get the arguments boilerplate from the current native context into r4. |
| const int kNormalOffset = |
| Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX); |
| const int kAliasedOffset = |
| Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX); |
| |
| __ ldr(r4, MemOperand(r8, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
| __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset)); |
| __ cmp(r1, Operand::Zero()); |
| __ ldr(r4, MemOperand(r4, kNormalOffset), eq); |
| __ ldr(r4, MemOperand(r4, kAliasedOffset), ne); |
| |
| // r0 = address of new object (tagged) |
| // r1 = mapped parameter count (tagged) |
| // r2 = argument count (tagged) |
| // r4 = address of boilerplate object (tagged) |
| // Copy the JS object part. |
| for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { |
| __ ldr(r3, FieldMemOperand(r4, i)); |
| __ str(r3, FieldMemOperand(r0, i)); |
| } |
| |
| // Set up the callee in-object property. |
| STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); |
| __ ldr(r3, MemOperand(sp, 2 * kPointerSize)); |
| const int kCalleeOffset = JSObject::kHeaderSize + |
| Heap::kArgumentsCalleeIndex * kPointerSize; |
| __ str(r3, FieldMemOperand(r0, kCalleeOffset)); |
| |
| // Use the length (smi tagged) and set that as an in-object property too. |
| STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); |
| const int kLengthOffset = JSObject::kHeaderSize + |
| Heap::kArgumentsLengthIndex * kPointerSize; |
| __ str(r2, FieldMemOperand(r0, kLengthOffset)); |
| |
| // Set up the elements pointer in the allocated arguments object. |
| // If we allocated a parameter map, r4 will point there, otherwise |
| // it will point to the backing store. |
| __ add(r4, r0, Operand(Heap::kArgumentsObjectSize)); |
| __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset)); |
| |
| // r0 = address of new object (tagged) |
| // r1 = mapped parameter count (tagged) |
| // r2 = argument count (tagged) |
| // r4 = address of parameter map or backing store (tagged) |
| // Initialize parameter map. If there are no mapped arguments, we're done. |
| Label skip_parameter_map; |
| __ cmp(r1, Operand(Smi::FromInt(0))); |
| // Move backing store address to r3, because it is |
| // expected there when filling in the unmapped arguments. |
| __ mov(r3, r4, LeaveCC, eq); |
| __ b(eq, &skip_parameter_map); |
| |
| __ LoadRoot(r6, Heap::kNonStrictArgumentsElementsMapRootIndex); |
| __ str(r6, FieldMemOperand(r4, FixedArray::kMapOffset)); |
| __ add(r6, r1, Operand(Smi::FromInt(2))); |
| __ str(r6, FieldMemOperand(r4, FixedArray::kLengthOffset)); |
| __ str(r8, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize)); |
| __ add(r6, r4, Operand(r1, LSL, 1)); |
| __ add(r6, r6, Operand(kParameterMapHeaderSize)); |
| __ str(r6, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize)); |
| |
| // Copy the parameter slots and the holes in the arguments. |
| // We need to fill in mapped_parameter_count slots. They index the context, |
| // where parameters are stored in reverse order, at |
| // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1 |
| // The mapped parameter thus need to get indices |
| // MIN_CONTEXT_SLOTS+parameter_count-1 .. |
| // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count |
| // We loop from right to left. |
| Label parameters_loop, parameters_test; |
| __ mov(r6, r1); |
| __ ldr(r9, MemOperand(sp, 0 * kPointerSize)); |
| __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS))); |
| __ sub(r9, r9, Operand(r1)); |
| __ LoadRoot(r7, Heap::kTheHoleValueRootIndex); |
| __ add(r3, r4, Operand(r6, LSL, 1)); |
| __ add(r3, r3, Operand(kParameterMapHeaderSize)); |
| |
| // r6 = loop variable (tagged) |
| // r1 = mapping index (tagged) |
| // r3 = address of backing store (tagged) |
| // r4 = address of parameter map (tagged) |
| // r5 = temporary scratch (a.o., for address calculation) |
| // r7 = the hole value |
| __ jmp(¶meters_test); |
| |
| __ bind(¶meters_loop); |
| __ sub(r6, r6, Operand(Smi::FromInt(1))); |
| __ mov(r5, Operand(r6, LSL, 1)); |
| __ add(r5, r5, Operand(kParameterMapHeaderSize - kHeapObjectTag)); |
| __ str(r9, MemOperand(r4, r5)); |
| __ sub(r5, r5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize)); |
| __ str(r7, MemOperand(r3, r5)); |
| __ add(r9, r9, Operand(Smi::FromInt(1))); |
| __ bind(¶meters_test); |
| __ cmp(r6, Operand(Smi::FromInt(0))); |
| __ b(ne, ¶meters_loop); |
| |
| __ bind(&skip_parameter_map); |
| // r2 = argument count (tagged) |
| // r3 = address of backing store (tagged) |
| // r5 = scratch |
| // Copy arguments header and remaining slots (if there are any). |
| __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex); |
| __ str(r5, FieldMemOperand(r3, FixedArray::kMapOffset)); |
| __ str(r2, FieldMemOperand(r3, FixedArray::kLengthOffset)); |
| |
| Label arguments_loop, arguments_test; |
| __ mov(r9, r1); |
| __ ldr(r4, MemOperand(sp, 1 * kPointerSize)); |
| __ sub(r4, r4, Operand(r9, LSL, 1)); |
| __ jmp(&arguments_test); |
| |
| __ bind(&arguments_loop); |
| __ sub(r4, r4, Operand(kPointerSize)); |
| __ ldr(r6, MemOperand(r4, 0)); |
| __ add(r5, r3, Operand(r9, LSL, 1)); |
| __ str(r6, FieldMemOperand(r5, FixedArray::kHeaderSize)); |
| __ add(r9, r9, Operand(Smi::FromInt(1))); |
| |
| __ bind(&arguments_test); |
| __ cmp(r9, Operand(r2)); |
| __ b(lt, &arguments_loop); |
| |
| // Return and remove the on-stack parameters. |
| __ add(sp, sp, Operand(3 * kPointerSize)); |
| __ Ret(); |
| |
| // Do the runtime call to allocate the arguments object. |
| // r2 = argument count (tagged) |
| __ bind(&runtime); |
| __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count. |
| __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); |
| } |
| |
| |
| void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { |
| // sp[0] : number of parameters |
| // sp[4] : receiver displacement |
| // sp[8] : function |
| // Check if the calling frame is an arguments adaptor frame. |
| Label adaptor_frame, try_allocate, runtime; |
| __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
| __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); |
| __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
| __ b(eq, &adaptor_frame); |
| |
| // Get the length from the frame. |
| __ ldr(r1, MemOperand(sp, 0)); |
| __ b(&try_allocate); |
| |
| // Patch the arguments.length and the parameters pointer. |
| __ bind(&adaptor_frame); |
| __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
| __ str(r1, MemOperand(sp, 0)); |
| __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1)); |
| __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); |
| __ str(r3, MemOperand(sp, 1 * kPointerSize)); |
| |
| // Try the new space allocation. Start out with computing the size |
| // of the arguments object and the elements array in words. |
| Label add_arguments_object; |
| __ bind(&try_allocate); |
| __ SmiUntag(r1, SetCC); |
| __ b(eq, &add_arguments_object); |
| __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize)); |
| __ bind(&add_arguments_object); |
| __ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize)); |
| |
| // Do the allocation of both objects in one go. |
| __ Allocate(r1, r0, r2, r3, &runtime, |
| static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); |
| |
| // Get the arguments boilerplate from the current native context. |
| __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
| __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset)); |
| __ ldr(r4, MemOperand(r4, Context::SlotOffset( |
| Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX))); |
| |
| // Copy the JS object part. |
| __ CopyFields(r0, r4, d0, JSObject::kHeaderSize / kPointerSize); |
| |
| // Get the length (smi tagged) and set that as an in-object property too. |
| STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); |
| __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); |
| __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + |
| Heap::kArgumentsLengthIndex * kPointerSize)); |
| |
| // If there are no actual arguments, we're done. |
| Label done; |
| __ cmp(r1, Operand::Zero()); |
| __ b(eq, &done); |
| |
| // Get the parameters pointer from the stack. |
| __ ldr(r2, MemOperand(sp, 1 * kPointerSize)); |
| |
| // Set up the elements pointer in the allocated arguments object and |
| // initialize the header in the elements fixed array. |
| __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict)); |
| __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset)); |
| __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex); |
| __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset)); |
| __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset)); |
| __ SmiUntag(r1); |
| |
| // Copy the fixed array slots. |
| Label loop; |
| // Set up r4 to point to the first array slot. |
| __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| __ bind(&loop); |
| // Pre-decrement r2 with kPointerSize on each iteration. |
| // Pre-decrement in order to skip receiver. |
| __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex)); |
| // Post-increment r4 with kPointerSize on each iteration. |
| __ str(r3, MemOperand(r4, kPointerSize, PostIndex)); |
| __ sub(r1, r1, Operand(1)); |
| __ cmp(r1, Operand::Zero()); |
| __ b(ne, &loop); |
| |
| // Return and remove the on-stack parameters. |
| __ bind(&done); |
| __ add(sp, sp, Operand(3 * kPointerSize)); |
| __ Ret(); |
| |
| // Do the runtime call to allocate the arguments object. |
| __ bind(&runtime); |
| __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1); |
| } |
| |
| |
| void RegExpExecStub::Generate(MacroAssembler* masm) { |
| // Just jump directly to runtime if native RegExp is not selected at compile |
| // time or if regexp entry in generated code is turned off runtime switch or |
| // at compilation. |
| #ifdef V8_INTERPRETED_REGEXP |
| __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); |
| #else // V8_INTERPRETED_REGEXP |
| |
| // Stack frame on entry. |
| // sp[0]: last_match_info (expected JSArray) |
| // sp[4]: previous index |
| // sp[8]: subject string |
| // sp[12]: JSRegExp object |
| |
| const int kLastMatchInfoOffset = 0 * kPointerSize; |
| const int kPreviousIndexOffset = 1 * kPointerSize; |
| const int kSubjectOffset = 2 * kPointerSize; |
| const int kJSRegExpOffset = 3 * kPointerSize; |
| |
| Label runtime; |
| // Allocation of registers for this function. These are in callee save |
| // registers and will be preserved by the call to the native RegExp code, as |
| // this code is called using the normal C calling convention. When calling |
| // directly from generated code the native RegExp code will not do a GC and |
| // therefore the content of these registers are safe to use after the call. |
| Register subject = r4; |
| Register regexp_data = r5; |
| Register last_match_info_elements = r6; |
| |
| // Ensure that a RegExp stack is allocated. |
| Isolate* isolate = masm->isolate(); |
| ExternalReference address_of_regexp_stack_memory_address = |
| ExternalReference::address_of_regexp_stack_memory_address(isolate); |
| ExternalReference address_of_regexp_stack_memory_size = |
| ExternalReference::address_of_regexp_stack_memory_size(isolate); |
| __ mov(r0, Operand(address_of_regexp_stack_memory_size)); |
| __ ldr(r0, MemOperand(r0, 0)); |
| __ cmp(r0, Operand::Zero()); |
| __ b(eq, &runtime); |
| |
| // Check that the first argument is a JSRegExp object. |
| __ ldr(r0, MemOperand(sp, kJSRegExpOffset)); |
| __ JumpIfSmi(r0, &runtime); |
| __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE); |
| __ b(ne, &runtime); |
| |
| // Check that the RegExp has been compiled (data contains a fixed array). |
| __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset)); |
| if (FLAG_debug_code) { |
| __ SmiTst(regexp_data); |
| __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected); |
| __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE); |
| __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected); |
| } |
| |
| // regexp_data: RegExp data (FixedArray) |
| // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. |
| __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset)); |
| __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP))); |
| __ b(ne, &runtime); |
| |
| // regexp_data: RegExp data (FixedArray) |
| // Check that the number of captures fit in the static offsets vector buffer. |
| __ ldr(r2, |
| FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); |
| // Check (number_of_captures + 1) * 2 <= offsets vector size |
| // Or number_of_captures * 2 <= offsets vector size - 2 |
| // Multiplying by 2 comes for free since r2 is smi-tagged. |
| STATIC_ASSERT(kSmiTag == 0); |
| STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); |
| STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2); |
| __ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2)); |
| __ b(hi, &runtime); |
| |
| // Reset offset for possibly sliced string. |
| __ mov(r9, Operand::Zero()); |
| __ ldr(subject, MemOperand(sp, kSubjectOffset)); |
| __ JumpIfSmi(subject, &runtime); |
| __ mov(r3, subject); // Make a copy of the original subject string. |
| __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); |
| __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); |
| // subject: subject string |
| // r3: subject string |
| // r0: subject string instance type |
| // regexp_data: RegExp data (FixedArray) |
| // Handle subject string according to its encoding and representation: |
| // (1) Sequential string? If yes, go to (5). |
| // (2) Anything but sequential or cons? If yes, go to (6). |
| // (3) Cons string. If the string is flat, replace subject with first string. |
| // Otherwise bailout. |
| // (4) Is subject external? If yes, go to (7). |
| // (5) Sequential string. Load regexp code according to encoding. |
| // (E) Carry on. |
| /// [...] |
| |
| // Deferred code at the end of the stub: |
| // (6) Not a long external string? If yes, go to (8). |
| // (7) External string. Make it, offset-wise, look like a sequential string. |
| // Go to (5). |
| // (8) Short external string or not a string? If yes, bail out to runtime. |
| // (9) Sliced string. Replace subject with parent. Go to (4). |
| |
| Label seq_string /* 5 */, external_string /* 7 */, |
| check_underlying /* 4 */, not_seq_nor_cons /* 6 */, |
| not_long_external /* 8 */; |
| |
| // (1) Sequential string? If yes, go to (5). |
| __ and_(r1, |
| r0, |
| Operand(kIsNotStringMask | |
| kStringRepresentationMask | |
| kShortExternalStringMask), |
| SetCC); |
| STATIC_ASSERT((kStringTag | kSeqStringTag) == 0); |
| __ b(eq, &seq_string); // Go to (5). |
| |
| // (2) Anything but sequential or cons? If yes, go to (6). |
| STATIC_ASSERT(kConsStringTag < kExternalStringTag); |
| STATIC_ASSERT(kSlicedStringTag > kExternalStringTag); |
| STATIC_ASSERT(kIsNotStringMask > kExternalStringTag); |
| STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag); |
| __ cmp(r1, Operand(kExternalStringTag)); |
| __ b(ge, ¬_seq_nor_cons); // Go to (6). |
| |
| // (3) Cons string. Check that it's flat. |
| // Replace subject with first string and reload instance type. |
| __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset)); |
| __ CompareRoot(r0, Heap::kempty_stringRootIndex); |
| __ b(ne, &runtime); |
| __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset)); |
| |
| // (4) Is subject external? If yes, go to (7). |
| __ bind(&check_underlying); |
| __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); |
| __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); |
| STATIC_ASSERT(kSeqStringTag == 0); |
| __ tst(r0, Operand(kStringRepresentationMask)); |
| // The underlying external string is never a short external string. |
| STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength); |
| STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength); |
| __ b(ne, &external_string); // Go to (7). |
| |
| // (5) Sequential string. Load regexp code according to encoding. |
| __ bind(&seq_string); |
| // subject: sequential subject string (or look-alike, external string) |
| // r3: original subject string |
| // Load previous index and check range before r3 is overwritten. We have to |
| // use r3 instead of subject here because subject might have been only made |
| // to look like a sequential string when it actually is an external string. |
| __ ldr(r1, MemOperand(sp, kPreviousIndexOffset)); |
| __ JumpIfNotSmi(r1, &runtime); |
| __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset)); |
| __ cmp(r3, Operand(r1)); |
| __ b(ls, &runtime); |
| __ SmiUntag(r1); |
| |
| STATIC_ASSERT(4 == kOneByteStringTag); |
| STATIC_ASSERT(kTwoByteStringTag == 0); |
| __ and_(r0, r0, Operand(kStringEncodingMask)); |
| __ mov(r3, Operand(r0, ASR, 2), SetCC); |
| __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne); |
| __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq); |
| |
| // (E) Carry on. String handling is done. |
| // r7: irregexp code |
| // Check that the irregexp code has been generated for the actual string |
| // encoding. If it has, the field contains a code object otherwise it contains |
| // a smi (code flushing support). |
| __ JumpIfSmi(r7, &runtime); |
| |
| // r1: previous index |
| // r3: encoding of subject string (1 if ASCII, 0 if two_byte); |
| // r7: code |
| // subject: Subject string |
| // regexp_data: RegExp data (FixedArray) |
| // All checks done. Now push arguments for native regexp code. |
| __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2); |
| |
| // Isolates: note we add an additional parameter here (isolate pointer). |
| const int kRegExpExecuteArguments = 9; |
| const int kParameterRegisters = 4; |
| __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); |
| |
| // Stack pointer now points to cell where return address is to be written. |
| // Arguments are before that on the stack or in registers. |
| |
| // Argument 9 (sp[20]): Pass current isolate address. |
| __ mov(r0, Operand(ExternalReference::isolate_address(isolate))); |
| __ str(r0, MemOperand(sp, 5 * kPointerSize)); |
| |
| // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript. |
| __ mov(r0, Operand(1)); |
| __ str(r0, MemOperand(sp, 4 * kPointerSize)); |
| |
| // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area. |
| __ mov(r0, Operand(address_of_regexp_stack_memory_address)); |
| __ ldr(r0, MemOperand(r0, 0)); |
| __ mov(r2, Operand(address_of_regexp_stack_memory_size)); |
| __ ldr(r2, MemOperand(r2, 0)); |
| __ add(r0, r0, Operand(r2)); |
| __ str(r0, MemOperand(sp, 3 * kPointerSize)); |
| |
| // Argument 6: Set the number of capture registers to zero to force global |
| // regexps to behave as non-global. This does not affect non-global regexps. |
| __ mov(r0, Operand::Zero()); |
| __ str(r0, MemOperand(sp, 2 * kPointerSize)); |
| |
| // Argument 5 (sp[4]): static offsets vector buffer. |
| __ mov(r0, |
| Operand(ExternalReference::address_of_static_offsets_vector(isolate))); |
| __ str(r0, MemOperand(sp, 1 * kPointerSize)); |
| |
| // For arguments 4 and 3 get string length, calculate start of string data and |
| // calculate the shift of the index (0 for ASCII and 1 for two byte). |
| __ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag)); |
| __ eor(r3, r3, Operand(1)); |
| // Load the length from the original subject string from the previous stack |
| // frame. Therefore we have to use fp, which points exactly to two pointer |
| // sizes below the previous sp. (Because creating a new stack frame pushes |
| // the previous fp onto the stack and moves up sp by 2 * kPointerSize.) |
| __ ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize)); |
| // If slice offset is not 0, load the length from the original sliced string. |
| // Argument 4, r3: End of string data |
| // Argument 3, r2: Start of string data |
| // Prepare start and end index of the input. |
| __ add(r9, r8, Operand(r9, LSL, r3)); |
| __ add(r2, r9, Operand(r1, LSL, r3)); |
| |
| __ ldr(r8, FieldMemOperand(subject, String::kLengthOffset)); |
| __ SmiUntag(r8); |
| __ add(r3, r9, Operand(r8, LSL, r3)); |
| |
| // Argument 2 (r1): Previous index. |
| // Already there |
| |
| // Argument 1 (r0): Subject string. |
| __ mov(r0, subject); |
| |
| // Locate the code entry and call it. |
| __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| DirectCEntryStub stub; |
| stub.GenerateCall(masm, r7); |
| |
| __ LeaveExitFrame(false, no_reg); |
| |
| // r0: result |
| // subject: subject string (callee saved) |
| // regexp_data: RegExp data (callee saved) |
| // last_match_info_elements: Last match info elements (callee saved) |
| // Check the result. |
| Label success; |
| __ cmp(r0, Operand(1)); |
| // We expect exactly one result since we force the called regexp to behave |
| // as non-global. |
| __ b(eq, &success); |
| Label failure; |
| __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE)); |
| __ b(eq, &failure); |
| __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION)); |
| // If not exception it can only be retry. Handle that in the runtime system. |
| __ b(ne, &runtime); |
| // Result must now be exception. If there is no pending exception already a |
| // stack overflow (on the backtrack stack) was detected in RegExp code but |
| // haven't created the exception yet. Handle that in the runtime system. |
| // TODO(592): Rerunning the RegExp to get the stack overflow exception. |
| __ mov(r1, Operand(isolate->factory()->the_hole_value())); |
| __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, |
| isolate))); |
| __ ldr(r0, MemOperand(r2, 0)); |
| __ cmp(r0, r1); |
| __ b(eq, &runtime); |
| |
| __ str(r1, MemOperand(r2, 0)); // Clear pending exception. |
| |
| // Check if the exception is a termination. If so, throw as uncatchable. |
| __ CompareRoot(r0, Heap::kTerminationExceptionRootIndex); |
| |
| Label termination_exception; |
| __ b(eq, &termination_exception); |
| |
| __ Throw(r0); |
| |
| __ bind(&termination_exception); |
| __ ThrowUncatchable(r0); |
| |
| __ bind(&failure); |
| // For failure and exception return null. |
| __ mov(r0, Operand(masm->isolate()->factory()->null_value())); |
| __ add(sp, sp, Operand(4 * kPointerSize)); |
| __ Ret(); |
| |
| // Process the result from the native regexp code. |
| __ bind(&success); |
| __ ldr(r1, |
| FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); |
| // Calculate number of capture registers (number_of_captures + 1) * 2. |
| // Multiplying by 2 comes for free since r1 is smi-tagged. |
| STATIC_ASSERT(kSmiTag == 0); |
| STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); |
| __ add(r1, r1, Operand(2)); // r1 was a smi. |
| |
| __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset)); |
| __ JumpIfSmi(r0, &runtime); |
| __ CompareObjectType(r0, r2, r2, JS_ARRAY_TYPE); |
| __ b(ne, &runtime); |
| // Check that the JSArray is in fast case. |
| __ ldr(last_match_info_elements, |
| FieldMemOperand(r0, JSArray::kElementsOffset)); |
| __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); |
| __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex); |
| __ b(ne, &runtime); |
| // Check that the last match info has space for the capture registers and the |
| // additional information. |
| __ ldr(r0, |
| FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset)); |
| __ add(r2, r1, Operand(RegExpImpl::kLastMatchOverhead)); |
| __ cmp(r2, Operand::SmiUntag(r0)); |
| __ b(gt, &runtime); |
| |
| // r1: number of capture registers |
| // r4: subject string |
| // Store the capture count. |
| __ SmiTag(r2, r1); |
| __ str(r2, FieldMemOperand(last_match_info_elements, |
| RegExpImpl::kLastCaptureCountOffset)); |
| // Store last subject and last input. |
| __ str(subject, |
| FieldMemOperand(last_match_info_elements, |
| RegExpImpl::kLastSubjectOffset)); |
| __ mov(r2, subject); |
| __ RecordWriteField(last_match_info_elements, |
| RegExpImpl::kLastSubjectOffset, |
| subject, |
| r7, |
| kLRHasNotBeenSaved, |
| kDontSaveFPRegs); |
| __ mov(subject, r2); |
| __ str(subject, |
| FieldMemOperand(last_match_info_elements, |
| RegExpImpl::kLastInputOffset)); |
| __ RecordWriteField(last_match_info_elements, |
| RegExpImpl::kLastInputOffset, |
| subject, |
| r7, |
| kLRHasNotBeenSaved, |
| kDontSaveFPRegs); |
| |
| // Get the static offsets vector filled by the native regexp code. |
| ExternalReference address_of_static_offsets_vector = |
| ExternalReference::address_of_static_offsets_vector(isolate); |
| __ mov(r2, Operand(address_of_static_offsets_vector)); |
| |
| // r1: number of capture registers |
| // r2: offsets vector |
| Label next_capture, done; |
| // Capture register counter starts from number of capture registers and |
| // counts down until wraping after zero. |
| __ add(r0, |
| last_match_info_elements, |
| Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag)); |
| __ bind(&next_capture); |
| __ sub(r1, r1, Operand(1), SetCC); |
| __ b(mi, &done); |
| // Read the value from the static offsets vector buffer. |
| __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex)); |
| // Store the smi value in the last match info. |
| __ SmiTag(r3); |
| __ str(r3, MemOperand(r0, kPointerSize, PostIndex)); |
| __ jmp(&next_capture); |
| __ bind(&done); |
| |
| // Return last match info. |
| __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset)); |
| __ add(sp, sp, Operand(4 * kPointerSize)); |
| __ Ret(); |
| |
| // Do the runtime call to execute the regexp. |
| __ bind(&runtime); |
| __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); |
| |
| // Deferred code for string handling. |
| // (6) Not a long external string? If yes, go to (8). |
| __ bind(¬_seq_nor_cons); |
| // Compare flags are still set. |
| __ b(gt, ¬_long_external); // Go to (8). |
| |
| // (7) External string. Make it, offset-wise, look like a sequential string. |
| __ bind(&external_string); |
| __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); |
| __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); |
| if (FLAG_debug_code) { |
| // Assert that we do not have a cons or slice (indirect strings) here. |
| // Sequential strings have already been ruled out. |
| __ tst(r0, Operand(kIsIndirectStringMask)); |
| __ Assert(eq, kExternalStringExpectedButNotFound); |
| } |
| __ ldr(subject, |
| FieldMemOperand(subject, ExternalString::kResourceDataOffset)); |
| // Move the pointer so that offset-wise, it looks like a sequential string. |
| STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); |
| __ sub(subject, |
| subject, |
| Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); |
| __ jmp(&seq_string); // Go to (5). |
| |
| // (8) Short external string or not a string? If yes, bail out to runtime. |
| __ bind(¬_long_external); |
| STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0); |
| __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask)); |
| __ b(ne, &runtime); |
| |
| // (9) Sliced string. Replace subject with parent. Go to (4). |
| // Load offset into r9 and replace subject string with parent. |
| __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset)); |
| __ SmiUntag(r9); |
| __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset)); |
| __ jmp(&check_underlying); // Go to (4). |
| #endif // V8_INTERPRETED_REGEXP |
| } |
| |
| |
| void RegExpConstructResultStub::Generate(MacroAssembler* masm) { |
| const int kMaxInlineLength = 100; |
| Label slowcase; |
| Label done; |
| Factory* factory = masm->isolate()->factory(); |
| |
| __ ldr(r1, MemOperand(sp, kPointerSize * 2)); |
| STATIC_ASSERT(kSmiTag == 0); |
| STATIC_ASSERT(kSmiTagSize == 1); |
| __ JumpIfNotSmi(r1, &slowcase); |
| __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength))); |
| __ b(hi, &slowcase); |
| // Smi-tagging is equivalent to multiplying by 2. |
| // Allocate RegExpResult followed by FixedArray with size in ebx. |
| // JSArray: [Map][empty properties][Elements][Length-smi][index][input] |
| // Elements: [Map][Length][..elements..] |
| // Size of JSArray with two in-object properties and the header of a |
| // FixedArray. |
| int objects_size = |
| (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize; |
| __ SmiUntag(r5, r1); |
| __ add(r2, r5, Operand(objects_size)); |
| __ Allocate( |
| r2, // In: Size, in words. |
| r0, // Out: Start of allocation (tagged). |
| r3, // Scratch register. |
| r4, // Scratch register. |
| &slowcase, |
| static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); |
| // r0: Start of allocated area, object-tagged. |
| // r1: Number of elements in array, as smi. |
| // r5: Number of elements, untagged. |
| |
| // Set JSArray map to global.regexp_result_map(). |
| // Set empty properties FixedArray. |
| // Set elements to point to FixedArray allocated right after the JSArray. |
| // Interleave operations for better latency. |
| __ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); |
| __ add(r3, r0, Operand(JSRegExpResult::kSize)); |
| __ mov(r4, Operand(factory->empty_fixed_array())); |
| __ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset)); |
| __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset)); |
| __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX)); |
| __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset)); |
| __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); |
| |
| // Set input, index and length fields from arguments. |
| __ ldr(r1, MemOperand(sp, kPointerSize * 0)); |
| __ ldr(r2, MemOperand(sp, kPointerSize * 1)); |
| __ ldr(r6, MemOperand(sp, kPointerSize * 2)); |
| __ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset)); |
| __ str(r2, FieldMemOperand(r0, JSRegExpResult::kIndexOffset)); |
| __ str(r6, FieldMemOperand(r0, JSArray::kLengthOffset)); |
| |
| // Fill out the elements FixedArray. |
| // r0: JSArray, tagged. |
| // r3: FixedArray, tagged. |
| // r5: Number of elements in array, untagged. |
| |
| // Set map. |
| __ mov(r2, Operand(factory->fixed_array_map())); |
| __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); |
| // Set FixedArray length. |
| __ SmiTag(r6, r5); |
| __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset)); |
| // Fill contents of fixed-array with undefined. |
| __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); |
| __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| // Fill fixed array elements with undefined. |
| // r0: JSArray, tagged. |
| // r2: undefined. |
| // r3: Start of elements in FixedArray. |
| // r5: Number of elements to fill. |
| Label loop; |
| __ cmp(r5, Operand::Zero()); |
| __ bind(&loop); |
| __ b(le, &done); // Jump if r5 is negative or zero. |
| __ sub(r5, r5, Operand(1), SetCC); |
| __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2)); |
| __ jmp(&loop); |
| |
| __ bind(&done); |
| __ add(sp, sp, Operand(3 * kPointerSize)); |
| __ Ret(); |
| |
| __ bind(&slowcase); |
| __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1); |
| } |
| |
| |
| static void GenerateRecordCallTarget(MacroAssembler* masm) { |
| // Cache the called function in a global property cell. Cache states |
| // are uninitialized, monomorphic (indicated by a JSFunction), and |
| // megamorphic. |
| // r1 : the function to call |
| // r2 : cache cell for call target |
| Label initialize, done, miss, megamorphic, not_array_function; |
| |
| ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()), |
| masm->isolate()->heap()->undefined_value()); |
| ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()), |
| masm->isolate()->heap()->the_hole_value()); |
| |
| // Load the cache state into r3. |
| __ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset)); |
| |
| // A monomorphic cache hit or an already megamorphic state: invoke the |
| // function without changing the state. |
| __ cmp(r3, r1); |
| __ b(eq, &done); |
| |
| // If we came here, we need to see if we are the array function. |
| // If we didn't have a matching function, and we didn't find the megamorph |
| // sentinel, then we have in the cell either some other function or an |
| // AllocationSite. Do a map check on the object in ecx. |
| Handle<Map> allocation_site_map( |
| masm->isolate()->heap()->allocation_site_map(), |
| masm->isolate()); |
| __ ldr(r5, FieldMemOperand(r3, 0)); |
| __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex); |
| __ b(ne, &miss); |
| |
| // Make sure the function is the Array() function |
| __ LoadArrayFunction(r3); |
| __ cmp(r1, r3); |
| __ b(ne, &megamorphic); |
| __ jmp(&done); |
| |
| __ bind(&miss); |
| |
| // A monomorphic miss (i.e, here the cache is not uninitialized) goes |
| // megamorphic. |
| __ CompareRoot(r3, Heap::kTheHoleValueRootIndex); |
| __ b(eq, &initialize); |
| // MegamorphicSentinel is an immortal immovable object (undefined) so no |
| // write-barrier is needed. |
| __ bind(&megamorphic); |
| __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
| __ str(ip, FieldMemOperand(r2, Cell::kValueOffset)); |
| __ jmp(&done); |
| |
| // An uninitialized cache is patched with the function or sentinel to |
| // indicate the ElementsKind if function is the Array constructor. |
| __ bind(&initialize); |
| // Make sure the function is the Array() function |
| __ LoadArrayFunction(r3); |
| __ cmp(r1, r3); |
| __ b(ne, ¬_array_function); |
| |
| // The target function is the Array constructor, |
| // Create an AllocationSite if we don't already have it, store it in the cell |
| { |
| FrameScope scope(masm, StackFrame::INTERNAL); |
| |
| __ SmiTag(r0); |
| __ push(r0); |
| __ push(r1); |
| __ push(r2); |
| |
| CreateAllocationSiteStub create_stub; |
| __ CallStub(&create_stub); |
| |
| __ pop(r2); |
| __ pop(r1); |
| __ pop(r0); |
| __ SmiUntag(r0); |
| } |
| __ b(&done); |
| |
| __ bind(¬_array_function); |
| __ str(r1, FieldMemOperand(r2, Cell::kValueOffset)); |
| // No need for a write barrier here - cells are rescanned. |
| |
| __ bind(&done); |
| } |
| |
| |
| void CallFunctionStub::Generate(MacroAssembler* masm) { |
| // r1 : the function to call |
| // r2 : cache cell for call target |
| Label slow, non_function; |
| |
| // The receiver might implicitly be the global object. This is |
| // indicated by passing the hole as the receiver to the call |
| // function stub. |
| if (ReceiverMightBeImplicit()) { |
| Label call; |
| // Get the receiver from the stack. |
| // function, receiver [, arguments] |
| __ ldr(r4, MemOperand(sp, argc_ * kPointerSize)); |
| // Call as function is indicated with the hole. |
| __ CompareRoot(r4, Heap::kTheHoleValueRootIndex); |
| __ b(ne, &call); |
| // Patch the receiver on the stack with the global receiver object. |
| __ ldr(r3, |
| MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
| __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalReceiverOffset)); |
| __ str(r3, MemOperand(sp, argc_ * kPointerSize)); |
| __ bind(&call); |
| } |
| |
| // Check that the function is really a JavaScript function. |
| // r1: pushed function (to be verified) |
| __ JumpIfSmi(r1, &non_function); |
| // Get the map of the function object. |
| __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE); |
| __ b(ne, &slow); |
| |
| if (RecordCallTarget()) { |
| GenerateRecordCallTarget(masm); |
| } |
| |
| // Fast-case: Invoke the function now. |
| // r1: pushed function |
| ParameterCount actual(argc_); |
| |
| if (ReceiverMightBeImplicit()) { |
| Label call_as_function; |
| __ CompareRoot(r4, Heap::kTheHoleValueRootIndex); |
| __ b(eq, &call_as_function); |
| __ InvokeFunction(r1, |
| actual, |
| JUMP_FUNCTION, |
| NullCallWrapper(), |
| CALL_AS_METHOD); |
| __ bind(&call_as_function); |
| } |
| __ InvokeFunction(r1, |
| actual, |
| JUMP_FUNCTION, |
| NullCallWrapper(), |
| CALL_AS_FUNCTION); |
| |
| // Slow-case: Non-function called. |
| __ bind(&slow); |
| if (RecordCallTarget()) { |
| // If there is a call target cache, mark it megamorphic in the |
| // non-function case. MegamorphicSentinel is an immortal immovable |
| // object (undefined) so no write barrier is needed. |
| ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()), |
| masm->isolate()->heap()->undefined_value()); |
| __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
| __ str(ip, FieldMemOperand(r2, Cell::kValueOffset)); |
| } |
| // Check for function proxy. |
| __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE)); |
| __ b(ne, &non_function); |
| __ push(r1); // put proxy as additional argument |
| __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE32)); |
| __ mov(r2, Operand::Zero()); |
| __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY); |
| __ SetCallKind(r5, CALL_AS_METHOD); |
| { |
| Handle<Code> adaptor = |
| masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); |
| __ Jump(adaptor, RelocInfo::CODE_TARGET); |
| } |
| |
| // CALL_NON_FUNCTION expects the non-function callee as receiver (instead |
| // of the original receiver from the call site). |
| __ bind(&non_function); |
| __ str(r1, MemOperand(sp, argc_ * kPointerSize)); |
| __ mov(r0, Operand(argc_)); // Set up the number of arguments. |
| __ mov(r2, Operand::Zero()); |
| __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); |
| __ SetCallKind(r5, CALL_AS_METHOD); |
| __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), |
| RelocInfo::CODE_TARGET); |
| } |
| |
| |
| void CallConstructStub::Generate(MacroAssembler* masm) { |
| // r0 : number of arguments |
| // r1 : the function to call |
| // r2 : cache cell for call target |
| Label slow, non_function_call; |
| |
| // Check that the function is not a smi. |
| __ JumpIfSmi(r1, &non_function_call); |
| // Check that the function is a JSFunction. |
| __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE); |
| __ b(ne, &slow); |
| |
| if (RecordCallTarget()) { |
| GenerateRecordCallTarget(masm); |
| } |
| |
| // Jump to the function-specific construct stub. |
| Register jmp_reg = r3; |
| __ ldr(jmp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); |
| __ ldr(jmp_reg, FieldMemOperand(jmp_reg, |
| SharedFunctionInfo::kConstructStubOffset)); |
| __ add(pc, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| |
| // r0: number of arguments |
| // r1: called object |
| // r3: object type |
| Label do_call; |
| __ bind(&slow); |
| __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE)); |
| __ b(ne, &non_function_call); |
| __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR); |
| __ jmp(&do_call); |
| |
| __ bind(&non_function_call); |
| __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); |
| __ bind(&do_call); |
| // Set expected number of arguments to zero (not changing r0). |
| __ mov(r2, Operand::Zero()); |
| __ SetCallKind(r5, CALL_AS_METHOD); |
| __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), |
| RelocInfo::CODE_TARGET); |
| } |
| |
| |
| // StringCharCodeAtGenerator |
| void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { |
| Label flat_string; |
| Label ascii_string; |
| Label got_char_code; |
| Label sliced_string; |
| |
| // If the receiver is a smi trigger the non-string case. |
| __ JumpIfSmi(object_, receiver_not_string_); |
| |
| // Fetch the instance type of the receiver into result register. |
| __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); |
| __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); |
| // If the receiver is not a string trigger the non-string case. |
| __ tst(result_, Operand(kIsNotStringMask)); |
| __ b(ne, receiver_not_string_); |
| |
| // If the index is non-smi trigger the non-smi case. |
| __ JumpIfNotSmi(index_, &index_not_smi_); |
| __ bind(&got_smi_index_); |
| |
| // Check for index out of range. |
| __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset)); |
| __ cmp(ip, Operand(index_)); |
| __ b(ls, index_out_of_range_); |
| |
| __ SmiUntag(index_); |
| |
| StringCharLoadGenerator::Generate(masm, |
| object_, |
| index_, |
| result_, |
| &call_runtime_); |
| |
| __ SmiTag(result_); |
| __ bind(&exit_); |
| } |
| |
| |
| void StringCharCodeAtGenerator::GenerateSlow( |
| MacroAssembler* masm, |
| const RuntimeCallHelper& call_helper) { |
| __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase); |
| |
| // Index is not a smi. |
| __ bind(&index_not_smi_); |
| // If index is a heap number, try converting it to an integer. |
| __ CheckMap(index_, |
| result_, |
| Heap::kHeapNumberMapRootIndex, |
| index_not_number_, |
| DONT_DO_SMI_CHECK); |
| call_helper.BeforeCall(masm); |
| __ push(object_); |
| __ push(index_); // Consumed by runtime conversion function. |
| if (index_flags_ == STRING_INDEX_IS_NUMBER) { |
| __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); |
| } else { |
| ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); |
| // NumberToSmi discards numbers that are not exact integers. |
| __ CallRuntime(Runtime::kNumberToSmi, 1); |
| } |
| // Save the conversion result before the pop instructions below |
| // have a chance to overwrite it. |
| __ Move(index_, r0); |
| __ pop(object_); |
| // Reload the instance type. |
| __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); |
| __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); |
| call_helper.AfterCall(masm); |
| // If index is still not a smi, it must be out of range. |
| __ JumpIfNotSmi(index_, index_out_of_range_); |
| // Otherwise, return to the fast path. |
| __ jmp(&got_smi_index_); |
| |
| // Call runtime. We get here when the receiver is a string and the |
| // index is a number, but the code of getting the actual character |
| // is too complex (e.g., when the string needs to be flattened). |
| __ bind(&call_runtime_); |
| call_helper.BeforeCall(masm); |
| __ SmiTag(index_); |
| __ Push(object_, index_); |
| __ CallRuntime(Runtime::kStringCharCodeAt, 2); |
| __ Move(result_, r0); |
| call_helper.AfterCall(masm); |
| __ jmp(&exit_); |
| |
| __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase); |
| } |
| |
| |
| // ------------------------------------------------------------------------- |
| // StringCharFromCodeGenerator |
| |
| void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { |
| // Fast case of Heap::LookupSingleCharacterStringFromCode. |
| STATIC_ASSERT(kSmiTag == 0); |
| STATIC_ASSERT(kSmiShiftSize == 0); |
| ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1)); |
| __ tst(code_, |
| Operand(kSmiTagMask | |
| ((~String::kMaxOneByteCharCode) << kSmiTagSize))); |
| __ b(ne, &slow_case_); |
| |
| __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); |
| // At this point code register contains smi tagged ASCII char code. |
| __ add(result_, result_, Operand::PointerOffsetFromSmiKey(code_)); |
| __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); |
| __ CompareRoot(result_, Heap::kUndefinedValueRootIndex); |
| __ b(eq, &slow_case_); |
| __ bind(&exit_); |
| } |
| |
| |
| void StringCharFromCodeGenerator::GenerateSlow( |
| MacroAssembler* masm, |
| const RuntimeCallHelper& call_helper) { |
| __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase); |
| |
| __ bind(&slow_case_); |
| call_helper.BeforeCall(masm); |
| __ push(code_); |
| __ CallRuntime(Runtime::kCharFromCode, 1); |
| __ Move(result_, r0); |
| call_helper.AfterCall(masm); |
| __ jmp(&exit_); |
| |
| __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase); |
| } |
| |
| |
| void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, |
| Register dest, |
| Register src, |
| Register count, |
| Register scratch, |
| bool ascii) { |
| Label loop; |
| Label done; |
| // This loop just copies one character at a time, as it is only used for very |
| // short strings. |
| if (!ascii) { |
| __ add(count, count, Operand(count), SetCC); |
| } else { |
| __ cmp(count, Operand::Zero()); |
| } |
| __ b(eq, &done); |
| |
| __ bind(&loop); |
| __ ldrb(scratch, MemOperand(src, 1, PostIndex)); |
| // Perform sub between load and dependent store to get the load time to |
| // complete. |
| __ sub(count, count, Operand(1), SetCC); |
| __ strb(scratch, MemOperand(dest, 1, PostIndex)); |
| // last iteration. |
| __ b(gt, &loop); |
| |
| __ bind(&done); |
| } |
| |
| |
| enum CopyCharactersFlags { |
| COPY_ASCII = 1, |
| DEST_ALWAYS_ALIGNED = 2 |
| }; |
| |
| |
| void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm, |
| Register dest, |
| Register src, |
| Register count, |
| Register scratch1, |
| Register scratch2, |
| Register scratch3, |
| Register scratch4, |
| Register scratch5, |
| int flags) { |
| bool ascii = (flags & COPY_ASCII) != 0; |
| bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0; |
| |
| if (dest_always_aligned && FLAG_debug_code) { |
| // Check that destination is actually word aligned if the flag says |
| // that it is. |
| __ tst(dest, Operand(kPointerAlignmentMask)); |
| __ Check(eq, kDestinationOfCopyNotAligned); |
| } |
| |
| const int kReadAlignment = 4; |
| const int kReadAlignmentMask = kReadAlignment - 1; |
| // Ensure that reading an entire aligned word containing the last character |
| // of a string will not read outside the allocated area (because we pad up |
| // to kObjectAlignment). |
| STATIC_ASSERT(kObjectAlignment >= kReadAlignment); |
| // Assumes word reads and writes are little endian. |
| // Nothing to do for zero characters. |
| Label done; |
| if (!ascii) { |
| __ add(count, count, Operand(count), SetCC); |
| } else { |
| __ cmp(count, Operand::Zero()); |
| } |
| __ b(eq, &done); |
| |
| // Assume that you cannot read (or write) unaligned. |
| Label byte_loop; |
| // Must copy at least eight bytes, otherwise just do it one byte at a time. |
| __ cmp(count, Operand(8)); |
| __ add(count, dest, Operand(count)); |
| Register limit = count; // Read until src equals this. |
| __ b(lt, &byte_loop); |
| |
| if (!dest_always_aligned) { |
| // Align dest by byte copying. Copies between zero and three bytes. |
| __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC); |
| Label dest_aligned; |
| __ b(eq, &dest_aligned); |
| __ cmp(scratch4, Operand(2)); |
| __ ldrb(scratch1, MemOperand(src, 1, PostIndex)); |
| __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le); |
| __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt); |
| __ strb(scratch1, MemOperand(dest, 1, PostIndex)); |
| __ strb(scratch2, MemOperand(dest, 1, PostIndex), le); |
| __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt); |
| __ bind(&dest_aligned); |
| } |
| |
| Label simple_loop; |
| |
| __ sub(scratch4, dest, Operand(src)); |
| __ and_(scratch4, scratch4, Operand(0x03), SetCC); |
| __ b(eq, &simple_loop); |
| // Shift register is number of bits in a source word that |
| // must be combined with bits in the next source word in order |
| // to create a destination word. |
| |
| // Complex loop for src/dst that are not aligned the same way. |
| { |
| Label loop; |
| __ mov(scratch4, Operand(scratch4, LSL, 3)); |
| Register left_shift = scratch4; |
| __ and_(src, src, Operand(~3)); // Round down to load previous word. |
| __ ldr(scratch1, MemOperand(src, 4, PostIndex)); |
| // Store the "shift" most significant bits of scratch in the least |
| // signficant bits (i.e., shift down by (32-shift)). |
| __ rsb(scratch2, left_shift, Operand(32)); |
| Register right_shift = scratch2; |
| __ mov(scratch1, Operand(scratch1, LSR, right_shift)); |
| |
| __ bind(&loop); |
| __ ldr(scratch3, MemOperand(src, 4, PostIndex)); |
| __ sub(scratch5, limit, Operand(dest)); |
| __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift)); |
| __ str(scratch1, MemOperand(dest, 4, PostIndex)); |
| __ mov(scratch1, Operand(scratch3, LSR, right_shift)); |
| // Loop if four or more bytes left to copy. |
| // Compare to eight, because we did the subtract before increasing dst. |
| __ sub(scratch5, scratch5, Operand(8), SetCC); |
| __ b(ge, &loop); |
| } |
| // There is now between zero and three bytes left to copy (negative that |
| // number is in scratch5), and between one and three bytes already read into |
| // scratch1 (eight times that number in scratch4). We may have read past |
| // the end of the string, but because objects are aligned, we have not read |
| // past the end of the object. |
| // Find the minimum of remaining characters to move and preloaded characters |
| // and write those as bytes. |
| __ add(scratch5, scratch5, Operand(4), SetCC); |
| __ b(eq, &done); |
| __ cmp(scratch4, Operand(scratch5, LSL, 3), ne); |
| // Move minimum of bytes read and bytes left to copy to scratch4. |
| __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt); |
| // Between one and three (value in scratch5) characters already read into |
| // scratch ready to write. |
| __ cmp(scratch5, Operand(2)); |
| __ strb(scratch1, MemOperand(dest, 1, PostIndex)); |
| __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge); |
| __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge); |
| __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt); |
| __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt); |
| // Copy any remaining bytes. |
| __ b(&byte_loop); |
| |
| // Simple loop. |
| // Copy words from src to dst, until less than four bytes left. |
| // Both src and dest are word aligned. |
| __ bind(&simple_loop); |
| { |
| Label loop; |
| __ bind(&loop); |
| __ ldr(scratch1, MemOperand(src, 4, PostIndex)); |
| __ sub(scratch3, limit, Operand(dest)); |
| __ str(scratch1, MemOperand(dest, 4, PostIndex)); |
| // Compare to 8, not 4, because we do the substraction before increasing |
| // dest. |
| __ cmp(scratch3, Operand(8)); |
| __ b(ge, &loop); |
| } |
| |
| // Copy bytes from src to dst until dst hits limit. |
| __ bind(&byte_loop); |
| __ cmp(dest, Operand(limit)); |
| __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt); |
| __ b(ge, &done); |
| __ strb(scratch1, MemOperand(dest, 1, PostIndex)); |
| __ b(&byte_loop); |
| |
| __ bind(&done); |
| } |
| |
| |
| void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm, |
| Register c1, |
| Register c2, |
| Register scratch1, |
| Register scratch2, |
| Register scratch3, |
| Register scratch4, |
| Register scratch5, |
| Label* not_found) { |
| // Register scratch3 is the general scratch register in this function. |
| Register scratch = scratch3; |
| |
| // Make sure that both characters are not digits as such strings has a |
| // different hash algorithm. Don't try to look for these in the string table. |
| Label not_array_index; |
| __ sub(scratch, c1, Operand(static_cast<int>('0'))); |
| __ cmp(scratch, Operand(static_cast<int>('9' - '0'))); |
| __ b(hi, ¬_array_index); |
| __ sub(scratch, c2, Operand(static_cast<int>('0'))); |
| __ cmp(scratch, Operand(static_cast<int>('9' - '0'))); |
| |
| // If check failed combine both characters into single halfword. |
| // This is required by the contract of the method: code at the |
| // not_found branch expects this combination in c1 register |
| __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls); |
| __ b(ls, not_found); |
| |
| __ bind(¬_array_index); |
| // Calculate the two character string hash. |
| Register hash = scratch1; |
| StringHelper::GenerateHashInit(masm, hash, c1); |
| StringHelper::GenerateHashAddCharacter(masm, hash, c2); |
| StringHelper::GenerateHashGetHash(masm, hash); |
| |
| // Collect the two characters in a register. |
| Register chars = c1; |
| __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte)); |
| |
| // chars: two character string, char 1 in byte 0 and char 2 in byte 1. |
| // hash: hash of two character string. |
| |
| // Load string table |
| // Load address of first element of the string table. |
| Register string_table = c2; |
| __ LoadRoot(string_table, Heap::kStringTableRootIndex); |
| |
| Register undefined = scratch4; |
| __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); |
| |
| // Calculate capacity mask from the string table capacity. |
| Register mask = scratch2; |
| __ ldr(mask, FieldMemOperand(string_table, StringTable::kCapacityOffset)); |
| __ mov(mask, Operand(mask, ASR, 1)); |
| __ sub(mask, mask, Operand(1)); |
| |
| // Calculate untagged address of the first element of the string table. |
| Register first_string_table_element = string_table; |
| __ add(first_string_table_element, string_table, |
| Operand(StringTable::kElementsStartOffset - kHeapObjectTag)); |
| |
| // Registers |
| // chars: two character string, char 1 in byte 0 and char 2 in byte 1. |
| // hash: hash of two character string |
| // mask: capacity mask |
| // first_string_table_element: address of the first element of |
| // the string table |
| // undefined: the undefined object |
| // scratch: - |
| |
| // Perform a number of probes in the string table. |
| const int kProbes = 4; |
| Label found_in_string_table; |
| Label next_probe[kProbes]; |
| Register candidate = scratch5; // Scratch register contains candidate. |
| for (int i = 0; i < kProbes; i++) { |
| // Calculate entry in string table. |
| if (i > 0) { |
| __ add(candidate, hash, Operand(StringTable::GetProbeOffset(i))); |
| } else { |
| __ mov(candidate, hash); |
| } |
| |
| __ and_(candidate, candidate, Operand(mask)); |
| |
| // Load the entry from the symble table. |
| STATIC_ASSERT(StringTable::kEntrySize == 1); |
| __ ldr(candidate, |
| MemOperand(first_string_table_element, |
| candidate, |
| LSL, |
| kPointerSizeLog2)); |
| |
| // If entry is undefined no string with this hash can be found. |
| Label is_string; |
| __ CompareObjectType(candidate, scratch, scratch, ODDBALL_TYPE); |
| __ b(ne, &is_string); |
| |
| __ cmp(undefined, candidate); |
| __ b(eq, not_found); |
| // Must be the hole (deleted entry). |
| if (FLAG_debug_code) { |
| __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
| __ cmp(ip, candidate); |
| __ Assert(eq, kOddballInStringTableIsNotUndefinedOrTheHole); |
| } |
| __ jmp(&next_probe[i]); |
| |
| __ bind(&is_string); |
| |
| // Check that the candidate is a non-external ASCII string. The instance |
| // type is still in the scratch register from the CompareObjectType |
| // operation. |
| __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]); |
| |
| // If length is not 2 the string is not a candidate. |
| __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset)); |
| __ cmp(scratch, Operand(Smi::FromInt(2))); |
| __ b(ne, &next_probe[i]); |
| |
| // Check if the two characters match. |
| // Assumes that word load is little endian. |
| __ ldrh(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize)); |
| __ cmp(chars, scratch); |
| __ b(eq, &found_in_string_table); |
| __ bind(&next_probe[i]); |
| } |
| |
| // No matching 2 character string found by probing. |
| __ jmp(not_found); |
| |
| // Scratch register contains result when we fall through to here. |
| Register result = candidate; |
| __ bind(&found_in_string_table); |
| __ Move(r0, result); |
| } |
| |
| |
| void StringHelper::GenerateHashInit(MacroAssembler* masm, |
| Register hash, |
| Register character) { |
| // hash = character + (character << 10); |
| __ LoadRoot(hash, Heap::kHashSeedRootIndex); |
| // Untag smi seed and add the character. |
| __ add(hash, character, Operand(hash, LSR, kSmiTagSize)); |
| // hash += hash << 10; |
| __ add(hash, hash, Operand(hash, LSL, 10)); |
| // hash ^= hash >> 6; |
| __ eor(hash, hash, Operand(hash, LSR, 6)); |
| } |
| |
| |
| void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, |
| Register hash, |
| Register character) { |
| // hash += character; |
| __ add(hash, hash, Operand(character)); |
| // hash += hash << 10; |
| __ add(hash, hash, Operand(hash, LSL, 10)); |
| // hash ^= hash >> 6; |
| __ eor(hash, hash, Operand(hash, LSR, 6)); |
| } |
| |
| |
| void StringHelper::GenerateHashGetHash(MacroAssembler* masm, |
| Register hash) { |
| // hash += hash << 3; |
| __ add(hash, hash, Operand(hash, LSL, 3)); |
| // hash ^= hash >> 11; |
| __ eor(hash, hash, Operand(hash, LSR, 11)); |
| // hash += hash << 15; |
| __ add(hash, hash, Operand(hash, LSL, 15)); |
| |
| __ and_(hash, hash, Operand(String::kHashBitMask), SetCC); |
| |
| // if (hash == 0) hash = 27; |
| __ mov(hash, Operand(StringHasher::kZeroHash), LeaveCC, eq); |
| } |
| |
| |
| void SubStringStub::Generate(MacroAssembler* masm) { |
| Label runtime; |
| |
| // Stack frame on entry. |
| // lr: return address |
| // sp[0]: to |
| // sp[4]: from |
| // sp[8]: string |
| |
| // This stub is called from the native-call %_SubString(...), so |
| // nothing can be assumed about the arguments. It is tested that: |
| // "string" is a sequential string, |
| // both "from" and "to" are smis, and |
| // 0 <= from <= to <= string.length. |
| // If any of these assumptions fail, we call the runtime system. |
| |
| const int kToOffset = 0 * kPointerSize; |
| const int kFromOffset = 1 * kPointerSize; |
| const int kStringOffset = 2 * kPointerSize; |
| |
| __ Ldrd(r2, r3, MemOperand(sp, kToOffset)); |
| STATIC_ASSERT(kFromOffset == kToOffset + 4); |
| STATIC_ASSERT(kSmiTag == 0); |
| STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); |
| |
| // Arithmetic shift right by one un-smi-tags. In this case we rotate right |
| // instead because we bail out on non-smi values: ROR and ASR are equivalent |
| // for smis but they set the flags in a way that's easier to optimize. |
| __ mov(r2, Operand(r2, ROR, 1), SetCC); |
| __ mov(r3, Operand(r3, ROR, 1), SetCC, cc); |
| // If either to or from had the smi tag bit set, then C is set now, and N |
| // has the same value: we rotated by 1, so the bottom bit is now the top bit. |
| // We want to bailout to runtime here if From is negative. In that case, the |
| // next instruction is not executed and we fall through to bailing out to |
| // runtime. |
| // Executed if both r2 and r3 are untagged integers. |
| __ sub(r2, r2, Operand(r3), SetCC, cc); |
| // One of the above un-smis or the above SUB could have set N==1. |
| __ b(mi, &runtime); // Either "from" or "to" is not an smi, or from > to. |
| |
| // Make sure first argument is a string. |
| __ ldr(r0, MemOperand(sp, kStringOffset)); |
| // Do a JumpIfSmi, but fold its jump into the subsequent string test. |
| __ SmiTst(r0); |
| Condition is_string = masm->IsObjectStringType(r0, r1, ne); |
| ASSERT(is_string == eq); |
| __ b(NegateCondition(is_string), &runtime); |
| |
| Label single_char; |
| __ cmp(r2, Operand(1)); |
| __ b(eq, &single_char); |
| |
| // Short-cut for the case of trivial substring. |
| Label return_r0; |
| // r0: original string |
| // r2: result string length |
| __ ldr(r4, FieldMemOperand(r0, String::kLengthOffset)); |
| __ cmp(r2, Operand(r4, ASR, 1)); |
| // Return original string. |
| __ b(eq, &return_r0); |
| // Longer than original string's length or negative: unsafe arguments. |
| __ b(hi, &runtime); |
| // Shorter than original string's length: an actual substring. |
| |
| // Deal with different string types: update the index if necessary |
| // and put the underlying string into r5. |
| // r0: original string |
| // r1: instance type |
| // r2: length |
| // r3: from index (untagged) |
| Label underlying_unpacked, sliced_string, seq_or_external_string; |
| // If the string is not indirect, it can only be sequential or external. |
| STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag)); |
| STATIC_ASSERT(kIsIndirectStringMask != 0); |
| __ tst(r1, Operand(kIsIndirectStringMask)); |
| __ b(eq, &seq_or_external_string); |
| |
| __ tst(r1, Operand(kSlicedNotConsMask)); |
| __ b(ne, &sliced_string); |
| // Cons string. Check whether it is flat, then fetch first part. |
| __ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset)); |
| __ CompareRoot(r5, Heap::kempty_stringRootIndex); |
| __ b(ne, &runtime); |
| __ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset)); |
| // Update instance type. |
| __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset)); |
| __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset)); |
| __ jmp(&underlying_unpacked); |
| |
| __ bind(&sliced_string); |
| // Sliced string. Fetch parent and correct start index by offset. |
| __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset)); |
| __ ldr(r4, FieldMemOperand(r0, SlicedString::kOffsetOffset)); |
| __ add(r3, r3, Operand(r4, ASR, 1)); // Add offset to index. |
| // Update instance type. |
| __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset)); |
| __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset)); |
| __ jmp(&underlying_unpacked); |
| |
| __ bind(&seq_or_external_string); |
| // Sequential or external string. Just move string to the expected register. |
| __ mov(r5, r0); |
| |
| __ bind(&underlying_unpacked); |
| |
| if (FLAG_string_slices) { |
| Label copy_routine; |
| // r5: underlying subject string |
| // r1: instance type of underlying subject string |
| // r2: length |
| // r3: adjusted start index (untagged) |
| __ cmp(r2, Operand(SlicedString::kMinLength)); |
| // Short slice. Copy instead of slicing. |
| __ b(lt, ©_routine); |
| // Allocate new sliced string. At this point we do not reload the instance |
| // type including the string encoding because we simply rely on the info |
| // provided by the original string. It does not matter if the original |
| // string's encoding is wrong because we always have to recheck encoding of |
| // the newly created string's parent anyways due to externalized strings. |
| Label two_byte_slice, set_slice_header; |
| STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); |
| STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); |
| __ tst(r1, Operand(kStringEncodingMask)); |
| __ b(eq, &two_byte_slice); |
| __ AllocateAsciiSlicedString(r0, r2, r6, r7, &runtime); |
| __ jmp(&set_slice_header); |
| __ bind(&two_byte_slice); |
| __ AllocateTwoByteSlicedString(r0, r2, r6, r7, &runtime); |
| __ bind(&set_slice_header); |
| __ mov(r3, Operand(r3, LSL, 1)); |
| __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset)); |
| __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset)); |
| __ jmp(&return_r0); |
| |
| __ bind(©_routine); |
| } |
| |
| // r5: underlying subject string |
| // r1: instance type of underlying subject string |
| // r2: length |
| // r3: adjusted start index (untagged) |
| Label two_byte_sequential, sequential_string, allocate_result; |
| STATIC_ASSERT(kExternalStringTag != 0); |
| STATIC_ASSERT(kSeqStringTag == 0); |
| __ tst(r1, Operand(kExternalStringTag)); |
| __ b(eq, &sequential_string); |
| |
| // Handle external string. |
| // Rule out short external strings. |
| STATIC_CHECK(kShortExternalStringTag != 0); |
| __ tst(r1, Operand(kShortExternalStringTag)); |
| __ b(ne, &runtime); |
| __ ldr(r5, FieldMemOperand(r5, ExternalString::kResourceDataOffset)); |
| // r5 already points to the first character of underlying string. |
| __ jmp(&allocate_result); |
| |
| __ bind(&sequential_string); |
| // Locate first character of underlying subject string. |
| STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); |
| __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); |
| |
| __ bind(&allocate_result); |
| // Sequential acii string. Allocate the result. |
| STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0); |
| __ tst(r1, Operand(kStringEncodingMask)); |
| __ b(eq, &two_byte_sequential); |
| |
| // Allocate and copy the resulting ASCII string. |
| __ AllocateAsciiString(r0, r2, r4, r6, r7, &runtime); |
| |
| // Locate first character of substring to copy. |
| __ add(r5, r5, r3); |
| // Locate first character of result. |
| __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); |
| |
| // r0: result string |
| // r1: first character of result string |
| // r2: result string length |
| // r5: first character of substring to copy |
| STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); |
| StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, |
| COPY_ASCII | DEST_ALWAYS_ALIGNED); |
| __ jmp(&return_r0); |
| |
| // Allocate and copy the resulting two-byte string. |
| __ bind(&two_byte_sequential); |
| __ AllocateTwoByteString(r0, r2, r4, r6, r7, &runtime); |
| |
| // Locate first character of substring to copy. |
| STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); |
| __ add(r5, r5, Operand(r3, LSL, 1)); |
| // Locate first character of result. |
| __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); |
| |
| // r0: result string. |
| // r1: first character of result. |
| // r2: result length. |
| // r5: first character of substring to copy. |
| STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); |
| StringHelper::GenerateCopyCharactersLong( |
| masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED); |
| |
| __ bind(&return_r0); |
| Counters* counters = masm->isolate()->counters(); |
| __ IncrementCounter(counters->sub_string_native(), 1, r3, r4); |
| __ Drop(3); |
| __ Ret(); |
| |
| // Just jump to runtime to create the sub string. |
| __ bind(&runtime); |
| __ TailCallRuntime(Runtime::kSubString, 3, 1); |
| |
| __ bind(&single_char); |
| // r0: original string |
| // r1: instance type |
| // r2: length |
| // r3: from index (untagged) |
| __ SmiTag(r3, r3); |
| StringCharAtGenerator generator( |
| r0, r3, r2, r0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER); |
| generator.GenerateFast(masm); |
| __ Drop(3); |
| __ Ret(); |
| generator.SkipSlow(masm, &runtime); |
| } |
| |
| |
| void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm, |
| Register left, |
| Register right, |
| Register scratch1, |
| Register scratch2, |
| Register scratch3) { |
| Register length = scratch1; |
| |
| // Compare lengths. |
| Label strings_not_equal, check_zero_length; |
| __ ldr(length, FieldMemOperand(left, String::kLengthOffset)); |
| __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset)); |
| __ cmp(length, scratch2); |
| __ b(eq, &check_zero_length); |
| __ bind(&strings_not_equal); |
| __ mov(r0, Operand(Smi::FromInt(NOT_EQUAL))); |
| __ Ret(); |
| |
| // Check if the length is zero. |
| Label compare_chars; |
| __ bind(&check_zero_length); |
| STATIC_ASSERT(kSmiTag == 0); |
| __ cmp(length, Operand::Zero()); |
| __ b(ne, &compare_chars); |
| __ mov(r0, Operand(Smi::FromInt(EQUAL))); |
| __ Ret(); |
| |
| // Compare characters. |
| __ bind(&compare_chars); |
| GenerateAsciiCharsCompareLoop(masm, |
| left, right, length, scratch2, scratch3, |
| &strings_not_equal); |
| |
| // Characters are equal. |
| __ mov(r0, Operand(Smi::FromInt(EQUAL))); |
| __ Ret(); |
| } |
| |
| |
| void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, |
| Register left, |
| Register right, |
| Register scratch1, |
| Register scratch2, |
| Register scratch3, |
| Register scratch4) { |
| Label result_not_equal, compare_lengths; |
| // Find minimum length and length difference. |
| __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset)); |
| __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset)); |
| __ sub(scratch3, scratch1, Operand(scratch2), SetCC); |
| Register length_delta = scratch3; |
| __ mov(scratch1, scratch2, LeaveCC, gt); |
| Register min_length = scratch1; |
| STATIC_ASSERT(kSmiTag == 0); |
| __ cmp(min_length, Operand::Zero()); |
| __ b(eq, &compare_lengths); |
| |
| // Compare loop. |
| GenerateAsciiCharsCompareLoop(masm, |
| left, right, min_length, scratch2, scratch4, |
| &result_not_equal); |
| |
| // Compare lengths - strings up to min-length are equal. |
| __ bind(&compare_lengths); |
| ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); |
| // Use length_delta as result if it's zero. |
| __ mov(r0, Operand(length_delta), SetCC); |
| __ bind(&result_not_equal); |
| // Conditionally update the result based either on length_delta or |
| // the last comparion performed in the loop above. |
| __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt); |
| __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt); |
| __ Ret(); |
| } |
| |
| |
| void StringCompareStub::GenerateAsciiCharsCompareLoop( |
| MacroAssembler* masm, |
| Register left, |
| Register right, |
| Register length, |
| Register scratch1, |
| Register scratch2, |
| Label* chars_not_equal) { |
| // Change index to run from -length to -1 by adding length to string |
| // start. This means that loop ends when index reaches zero, which |
| // doesn't need an additional compare. |
| __ SmiUntag(length); |
| __ add(scratch1, length, |
| Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); |
| __ add(left, left, Operand(scratch1)); |
| __ add(right, right, Operand(scratch1)); |
| __ rsb(length, length, Operand::Zero()); |
| Register index = length; // index = -length; |
| |
| // Compare loop. |
| Label loop; |
| __ bind(&loop); |
| __ ldrb(scratch1, MemOperand(left, index)); |
| __ ldrb(scratch2, MemOperand(right, index)); |
| __ cmp(scratch1, scratch2); |
| __ b(ne, chars_not_equal); |
| __ add(index, index, Operand(1), SetCC); |
| __ b(ne, &loop); |
| } |
| |
| |
| void StringCompareStub::Generate(MacroAssembler* masm) { |
| Label runtime; |
| |
| Counters* counters = masm->isolate()->counters(); |
| |
| // Stack frame on entry. |
| // sp[0]: right string |
| // sp[4]: left string |
| __ Ldrd(r0 , r1, MemOperand(sp)); // Load right in r0, left in r1. |
| |
| Label not_same; |
| __ cmp(r0, r1); |
| __ b(ne, ¬_same); |
| STATIC_ASSERT(EQUAL == 0); |
| STATIC_ASSERT(kSmiTag == 0); |
| __ mov(r0, Operand(Smi::FromInt(EQUAL))); |
| __ IncrementCounter(counters->string_compare_native(), 1, r1, r2); |
| __ add(sp, sp, Operand(2 * kPointerSize)); |
| __ Ret(); |
| |
| __ bind(¬_same); |
| |
| // Check that both objects are sequential ASCII strings. |
| __ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime); |
| |
| // Compare flat ASCII strings natively. Remove arguments from stack first. |
| __ IncrementCounter(counters->string_compare_native(), 1, r2, r3); |
| __ add(sp, sp, Operand(2 * kPointerSize)); |
| GenerateCompareFlatAsciiStrings(masm, r1, r0, r2, r3, r4, r5); |
| |
| // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) |
| // tagged as a small integer. |
| __ bind(&runtime); |
| __ TailCallRuntime(Runtime::kStringCompare, 2, 1); |
| } |
| |
| |
| void StringAddStub::Generate(MacroAssembler* masm) { |
| Label call_runtime, call_builtin; |
| Builtins::JavaScript builtin_id = Builtins::ADD; |
| |
| Counters* counters = masm->isolate()->counters(); |
| |
| // Stack on entry: |
| // sp[0]: second argument (right). |
| // sp[4]: first argument (left). |
| |
| // Load the two arguments. |
| __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument. |
| __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument. |
| |
| // Make sure that both arguments are strings if not known in advance. |
| // Otherwise, at least one of the arguments is definitely a string, |
| // and we convert the one that is not known to be a string. |
| if ((flags_ & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) { |
| ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT); |
| ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT); |
| __ JumpIfEitherSmi(r0, r1, &call_runtime); |
| // Load instance types. |
| __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); |
| __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); |
| __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); |
| __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); |
| STATIC_ASSERT(kStringTag == 0); |
| // If either is not a string, go to runtime. |
| __ tst(r4, Operand(kIsNotStringMask)); |
| __ tst(r5, Operand(kIsNotStringMask), eq); |
| __ b(ne, &call_runtime); |
| } else if ((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) { |
| ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == 0); |
| GenerateConvertArgument( |
| masm, 1 * kPointerSize, r0, r2, r3, r4, r5, &call_builtin); |
| builtin_id = Builtins::STRING_ADD_RIGHT; |
| } else if ((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) { |
| ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == 0); |
| GenerateConvertArgument( |
| masm, 0 * kPointerSize, r1, r2, r3, r4, r5, &call_builtin); |
| builtin_id = Builtins::STRING_ADD_LEFT; |
| } |
| |
| // Both arguments are strings. |
| // r0: first string |
| // r1: second string |
| // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) |
| // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) |
| { |
| Label strings_not_empty; |
| // Check if either of the strings are empty. In that case return the other. |
| __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset)); |
| __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset)); |
| STATIC_ASSERT(kSmiTag == 0); |
| __ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty. |
| __ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second. |
| STATIC_ASSERT(kSmiTag == 0); |
| // Else test if second string is empty. |
| __ cmp(r3, Operand(Smi::FromInt(0)), ne); |
| __ b(ne, &strings_not_empty); // If either string was empty, return r0. |
| |
| __ IncrementCounter(counters->string_add_native(), 1, r2, r3); |
| __ add(sp, sp, Operand(2 * kPointerSize)); |
| __ Ret(); |
| |
| __ bind(&strings_not_empty); |
| } |
| |
| __ SmiUntag(r2); |
| __ SmiUntag(r3); |
| // Both strings are non-empty. |
| // r0: first string |
| // r1: second string |
| // r2: length of first string |
| // r3: length of second string |
| // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) |
| // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) |
| // Look at the length of the result of adding the two strings. |
| Label string_add_flat_result, longer_than_two; |
| // Adding two lengths can't overflow. |
| STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2); |
| __ add(r6, r2, Operand(r3)); |
| // Use the string table when adding two one character strings, as it |
| // helps later optimizations to return a string here. |
| __ cmp(r6, Operand(2)); |
| __ b(ne, &longer_than_two); |
| |
| // Check that both strings are non-external ASCII strings. |
| if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { |
| __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); |
| __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); |
| __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); |
| __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); |
| } |
| __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7, |
| &call_runtime); |
| |
| // Get the two characters forming the sub string. |
| __ ldrb(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize)); |
| __ ldrb(r3, FieldMemOperand(r1, SeqOneByteString::kHeaderSize)); |
| |
| // Try to lookup two character string in string table. If it is not found |
| // just allocate a new one. |
| Label make_two_character_string; |
| StringHelper::GenerateTwoCharacterStringTableProbe( |
| masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string); |
| __ IncrementCounter(counters->string_add_native(), 1, r2, r3); |
| __ add(sp, sp, Operand(2 * kPointerSize)); |
| __ Ret(); |
| |
| __ bind(&make_two_character_string); |
| // Resulting string has length 2 and first chars of two strings |
| // are combined into single halfword in r2 register. |
| // So we can fill resulting string without two loops by a single |
| // halfword store instruction (which assumes that processor is |
| // in a little endian mode) |
| __ mov(r6, Operand(2)); |
| __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime); |
| __ strh(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize)); |
| __ IncrementCounter(counters->string_add_native(), 1, r2, r3); |
| __ add(sp, sp, Operand(2 * kPointerSize)); |
| __ Ret(); |
| |
| __ bind(&longer_than_two); |
| // Check if resulting string will be flat. |
| __ cmp(r6, Operand(ConsString::kMinLength)); |
| __ b(lt, &string_add_flat_result); |
| // Handle exceptionally long strings in the runtime system. |
| STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0); |
| ASSERT(IsPowerOf2(String::kMaxLength + 1)); |
| // kMaxLength + 1 is representable as shifted literal, kMaxLength is not. |
| __ cmp(r6, Operand(String::kMaxLength + 1)); |
| __ b(hs, &call_runtime); |
| |
| // If result is not supposed to be flat, allocate a cons string object. |
| // If both strings are ASCII the result is an ASCII cons string. |
| if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { |
| __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); |
| __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); |
| __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); |
| __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); |
| } |
| Label non_ascii, allocated, ascii_data; |
| STATIC_ASSERT(kTwoByteStringTag == 0); |
| __ tst(r4, Operand(kStringEncodingMask)); |
| __ tst(r5, Operand(kStringEncodingMask), ne); |
| __ b(eq, &non_ascii); |
| |
| // Allocate an ASCII cons string. |
| __ bind(&ascii_data); |
| __ AllocateAsciiConsString(r7, r6, r4, r5, &call_runtime); |
| __ bind(&allocated); |
| // Fill the fields of the cons string. |
| Label skip_write_barrier, after_writing; |
| ExternalReference high_promotion_mode = ExternalReference:: |
| new_space_high_promotion_mode_active_address(masm->isolate()); |
| __ mov(r4, Operand(high_promotion_mode)); |
| __ ldr(r4, MemOperand(r4, 0)); |
| __ cmp(r4, Operand::Zero()); |
| __ b(eq, &skip_write_barrier); |
| |
| __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset)); |
| __ RecordWriteField(r7, |
| ConsString::kFirstOffset, |
| r0, |
| r4, |
| kLRHasNotBeenSaved, |
| kDontSaveFPRegs); |
| __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset)); |
| __ RecordWriteField(r7, |
| ConsString::kSecondOffset, |
| r1, |
| r4, |
| kLRHasNotBeenSaved, |
| kDontSaveFPRegs); |
| __ jmp(&after_writing); |
| |
| __ bind(&skip_write_barrier); |
| __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset)); |
| __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset)); |
| |
| __ bind(&after_writing); |
| |
| __ mov(r0, Operand(r7)); |
| __ IncrementCounter(counters->string_add_native(), 1, r2, r3); |
| __ add(sp, sp, Operand(2 * kPointerSize)); |
| __ Ret(); |
| |
| __ bind(&non_ascii); |
| // At least one of the strings is two-byte. Check whether it happens |
| // to contain only one byte characters. |
| // r4: first instance type. |
| // r5: second instance type. |
| __ tst(r4, Operand(kOneByteDataHintMask)); |
| __ tst(r5, Operand(kOneByteDataHintMask), ne); |
| __ b(ne, &ascii_data); |
| __ eor(r4, r4, Operand(r5)); |
| STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0); |
| __ and_(r4, r4, Operand(kOneByteStringTag | kOneByteDataHintTag)); |
| __ cmp(r4, Operand(kOneByteStringTag | kOneByteDataHintTag)); |
| __ b(eq, &ascii_data); |
| |
| // Allocate a two byte cons string. |
| __ AllocateTwoByteConsString(r7, r6, r4, r5, &call_runtime); |
| __ jmp(&allocated); |
| |
| // We cannot encounter sliced strings or cons strings here since: |
| STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength); |
| // Handle creating a flat result from either external or sequential strings. |
| // Locate the first characters' locations. |
| // r0: first string |
| // r1: second string |
| // r2: length of first string |
| // r3: length of second string |
| // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) |
| // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) |
| // r6: sum of lengths. |
| Label first_prepared, second_prepared; |
| __ bind(&string_add_flat_result); |
| if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) { |
| __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); |
| __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); |
| __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); |
| __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); |
| } |
| |
| // Check whether both strings have same encoding |
| __ eor(r7, r4, Operand(r5)); |
| __ tst(r7, Operand(kStringEncodingMask)); |
| __ b(ne, &call_runtime); |
| |
| STATIC_ASSERT(kSeqStringTag == 0); |
| __ tst(r4, Operand(kStringRepresentationMask)); |
| STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); |
| __ add(r7, |
| r0, |
| Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag), |
| LeaveCC, |
| eq); |
| __ b(eq, &first_prepared); |
| // External string: rule out short external string and load string resource. |
| STATIC_ASSERT(kShortExternalStringTag != 0); |
| __ tst(r4, Operand(kShortExternalStringMask)); |
| __ b(ne, &call_runtime); |
| __ ldr(r7, FieldMemOperand(r0, ExternalString::kResourceDataOffset)); |
| __ bind(&first_prepared); |
| |
| STATIC_ASSERT(kSeqStringTag == 0); |
| __ tst(r5, Operand(kStringRepresentationMask)); |
| STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); |
| __ add(r1, |
| r1, |
| Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag), |
| LeaveCC, |
| eq); |
| __ b(eq, &second_prepared); |
| // External string: rule out short external string and load string resource. |
| STATIC_ASSERT(kShortExternalStringTag != 0); |
| __ tst(r5, Operand(kShortExternalStringMask)); |
| __ b(ne, &call_runtime); |
| __ ldr(r1, FieldMemOperand(r1, ExternalString::kResourceDataOffset)); |
| __ bind(&second_prepared); |
| |
| Label non_ascii_string_add_flat_result; |
| // r7: first character of first string |
| // r1: first character of second string |
| // r2: length of first string. |
| // r3: length of second string. |
| // r6: sum of lengths. |
| // Both strings have the same encoding. |
| STATIC_ASSERT(kTwoByteStringTag == 0); |
| __ tst(r5, Operand(kStringEncodingMask)); |
| __ b(eq, &non_ascii_string_add_flat_result); |
| |
| __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime); |
| __ add(r6, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); |
| // r0: result string. |
| // r7: first character of first string. |
| // r1: first character of second string. |
| // r2: length of first string. |
| // r3: length of second string. |
| // r6: first character of result. |
| StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, true); |
| // r6: next character of result. |
| StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true); |
| __ IncrementCounter(counters->string_add_native(), 1, r2, r3); |
| __ add(sp, sp, Operand(2 * kPointerSize)); |
| __ Ret(); |
| |
| __ bind(&non_ascii_string_add_flat_result); |
| __ AllocateTwoByteString(r0, r6, r4, r5, r9, &call_runtime); |
| __ add(r6, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); |
| // r0: result string. |
| // r7: first character of first string. |
| // r1: first character of second string. |
| // r2: length of first string. |
| // r3: length of second string. |
| // r6: first character of result. |
| StringHelper::GenerateCopyCharacters(masm, r6, r7, r2, r4, false); |
| // r6: next character of result. |
| StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false); |
| __ IncrementCounter(counters->string_add_native(), 1, r2, r3); |
| __ add(sp, sp, Operand(2 * kPointerSize)); |
| __ Ret(); |
| |
| // Just jump to runtime to add the two strings. |
| __ bind(&call_runtime); |
| if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) { |
| GenerateRegisterArgsPop(masm); |
| // Build a frame |
| { |
| FrameScope scope(masm, StackFrame::INTERNAL); |
| GenerateRegisterArgsPush(masm); |
| __ CallRuntime(Runtime::kStringAdd, 2); |
| } |
| __ Ret(); |
| } else { |
| __ TailCallRuntime(Runtime::kStringAdd, 2, 1); |
| } |
| |
| if (call_builtin.is_linked()) { |
| __ bind(&call_builtin); |
| if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) { |
| GenerateRegisterArgsPop(masm); |
| // Build a frame |
| { |
| FrameScope scope(masm, StackFrame::INTERNAL); |
| GenerateRegisterArgsPush(masm); |
| __ InvokeBuiltin(builtin_id, CALL_FUNCTION); |
| } |
| __ Ret(); |
| } else { |
| __ InvokeBuiltin(builtin_id, JUMP_FUNCTION); |
| } |
| } |
| } |
| |
| |
| void StringAddStub::GenerateRegisterArgsPush(MacroAssembler* masm) { |
| __ push(r0); |
| __ push(r1); |
| } |
| |
| |
| void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm) { |
| __ pop(r1); |
| __ pop(r0); |
| } |
| |
| |
| void StringAddStub::GenerateConvertArgument(MacroAssembler* masm, |
| int stack_offset, |
| Register arg, |
| Register scratch1, |
| Register scratch2, |
| Register scratch3, |
| Register scratch4, |
| Label* slow) { |
| // First check if the argument is already a string. |
| Label not_string, done; |
| __ JumpIfSmi(arg, ¬_string); |
| __ CompareObjectType(arg, scratch1, scratch1, FIRST_NONSTRING_TYPE); |
| __ b(lt, &done); |
| |
| // Check the number to string cache. |
| Label not_cached; |
| __ bind(¬_string); |
| // Puts the cached result into scratch1. |
| NumberToStringStub::GenerateLookupNumberStringCache(masm, |
| arg, |
| scratch1, |
| scratch2, |
| scratch3, |
| scratch4, |
| ¬_cached); |
| __ mov(arg, scratch1); |
| __ str(arg, MemOperand(sp, stack_offset)); |
| __ jmp(&done); |
| |
| // Check if the argument is a safe string wrapper. |
| __ bind(¬_cached); |
| __ JumpIfSmi(arg, slow); |
| __ CompareObjectType( |
| arg, scratch1, scratch2, JS_VALUE_TYPE); // map -> scratch1. |
| __ b(ne, slow); |
| __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset)); |
| __ and_(scratch2, |
| scratch2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf)); |
| __ cmp(scratch2, |
| Operand(1 << Map::kStringWrapperSafeForDefaultValueOf)); |
| __ b(ne, slow); |
| __ ldr(arg, FieldMemOperand(arg, JSValue::kValueOffset)); |
| __ str(arg, MemOperand(sp, stack_offset)); |
| |
| __ bind(&done); |
| } |
| |
| |
| void ICCompareStub::GenerateSmis(MacroAssembler* masm) { |
| ASSERT(state_ == CompareIC::SMI); |
| Label miss; |
| __ orr(r2, r1, r0); |
| __ JumpIfNotSmi(r2, &miss); |
| |
| if (GetCondition() == eq) { |
| // For equality we do not care about the sign of the result. |
| __ sub(r0, r0, r1, SetCC); |
| } else { |
| // Untag before subtracting to avoid handling overflow. |
| __ SmiUntag(r1); |
| __ sub(r0, r1, Operand::SmiUntag(r0)); |
| } |
| __ Ret(); |
| |
| __ bind(&miss); |
| GenerateMiss(masm); |
| } |
| |
| |
| void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { |
| ASSERT(state_ == CompareIC::NUMBER); |
| |
| Label generic_stub; |
| Label unordered, maybe_undefined1, maybe_undefined2; |
| Label miss; |
| |
| if (left_ == CompareIC::SMI) { |
| __ JumpIfNotSmi(r1, &miss); |
| } |
| if (right_ == CompareIC::SMI) { |
| __ JumpIfNotSmi(r0, &miss); |
| } |
| |
| // Inlining the double comparison and falling back to the general compare |
| // stub if NaN is involved. |
| // Load left and right operand. |
| Label done, left, left_smi, right_smi; |
| __ JumpIfSmi(r0, &right_smi); |
| __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, |
| DONT_DO_SMI_CHECK); |
| __ sub(r2, r0, Operand(kHeapObjectTag)); |
| __ vldr(d1, r2, HeapNumber::kValueOffset); |
| __ b(&left); |
| __ bind(&right_smi); |
| __ SmiToDouble(d1, r0); |
| |
| __ bind(&left); |
| __ JumpIfSmi(r1, &left_smi); |
| __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, |
| DONT_DO_SMI_CHECK); |
| __ sub(r2, r1, Operand(kHeapObjectTag)); |
| __ vldr(d0, r2, HeapNumber::kValueOffset); |
| __ b(&done); |
| __ bind(&left_smi); |
| __ SmiToDouble(d0, r1); |
| |
| __ bind(&done); |
| // Compare operands. |
| __ VFPCompareAndSetFlags(d0, d1); |
| |
| // Don't base result on status bits when a NaN is involved. |
| __ b(vs, &unordered); |
| |
| // Return a result of -1, 0, or 1, based on status bits. |
| __ mov(r0, Operand(EQUAL), LeaveCC, eq); |
| __ mov(r0, Operand(LESS), LeaveCC, lt); |
| __ mov(r0, Operand(GREATER), LeaveCC, gt); |
| __ Ret(); |
| |
| __ bind(&unordered); |
| __ bind(&generic_stub); |
| ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC, |
| CompareIC::GENERIC); |
| __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); |
| |
| __ bind(&maybe_undefined1); |
| if (Token::IsOrderedRelationalCompareOp(op_)) { |
| __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); |
| __ b(ne, &miss); |
| __ JumpIfSmi(r1, &unordered); |
| __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE); |
| __ b(ne, &maybe_undefined2); |
| __ jmp(&unordered); |
| } |
| |
| __ bind(&maybe_undefined2); |
| if (Token::IsOrderedRelationalCompareOp(op_)) { |
| __ CompareRoot(r1, Heap::kUndefinedValueRootIndex); |
| __ b(eq, &unordered); |
| } |
| |
| __ bind(&miss); |
| GenerateMiss(masm); |
| } |
| |
| |
| void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) { |
| ASSERT(state_ == CompareIC::INTERNALIZED_STRING); |
| Label miss; |
| |
| // Registers containing left and right operands respectively. |
| Register left = r1; |
| Register right = r0; |
| Register tmp1 = r2; |
| Register tmp2 = r3; |
| |
| // Check that both operands are heap objects. |
| __ JumpIfEitherSmi(left, right, &miss); |
| |
| // Check that both operands are internalized strings. |
| __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); |
| __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); |
| __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); |
| __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); |
| STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); |
| __ orr(tmp1, tmp1, Operand(tmp2)); |
| __ tst(tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask)); |
| __ b(ne, &miss); |
| |
| // Internalized strings are compared by identity. |
| __ cmp(left, right); |
| // Make sure r0 is non-zero. At this point input operands are |
| // guaranteed to be non-zero. |
| ASSERT(right.is(r0)); |
| STATIC_ASSERT(EQUAL == 0); |
| STATIC_ASSERT(kSmiTag == 0); |
| __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq); |
| __ Ret(); |
| |
| __ bind(&miss); |
| GenerateMiss(masm); |
| } |
| |
| |
| void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) { |
| ASSERT(state_ == CompareIC::UNIQUE_NAME); |
| ASSERT(GetCondition() == eq); |
| Label miss; |
| |
| // Registers containing left and right operands respectively. |
| Register left = r1; |
| Register right = r0; |
| Register tmp1 = r2; |
| Register tmp2 = r3; |
| |
| // Check that both operands are heap objects. |
| __ JumpIfEitherSmi(left, right, &miss); |
| |
| // Check that both operands are unique names. This leaves the instance |
| // types loaded in tmp1 and tmp2. |
| __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); |
| __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); |
| __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); |
| __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); |
| |
| __ JumpIfNotUniqueName(tmp1, &miss); |
| __ JumpIfNotUniqueName(tmp2, &miss); |
| |
| // Unique names are compared by identity. |
| __ cmp(left, right); |
| // Make sure r0 is non-zero. At this point input operands are |
| // guaranteed to be non-zero. |
| ASSERT(right.is(r0)); |
| STATIC_ASSERT(EQUAL == 0); |
| STATIC_ASSERT(kSmiTag == 0); |
| __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq); |
| __ Ret(); |
| |
| __ bind(&miss); |
| GenerateMiss(masm); |
| } |
| |
| |
| void ICCompareStub::GenerateStrings(MacroAssembler* masm) { |
| ASSERT(state_ == CompareIC::STRING); |
| Label miss; |
| |
| bool equality = Token::IsEqualityOp(op_); |
| |
| // Registers containing left and right operands respectively. |
| Register left = r1; |
| Register right = r0; |
| Register tmp1 = r2; |
| Register tmp2 = r3; |
| Register tmp3 = r4; |
| Register tmp4 = r5; |
| |
| // Check that both operands are heap objects. |
| __ JumpIfEitherSmi(left, right, &miss); |
| |
| // Check that both operands are strings. This leaves the instance |
| // types loaded in tmp1 and tmp2. |
| __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); |
| __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); |
| __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); |
| __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); |
| STATIC_ASSERT(kNotStringTag != 0); |
| __ orr(tmp3, tmp1, tmp2); |
| __ tst(tmp3, Operand(kIsNotStringMask)); |
| __ b(ne, &miss); |
| |
| // Fast check for identical strings. |
| __ cmp(left, right); |
| STATIC_ASSERT(EQUAL == 0); |
| STATIC_ASSERT(kSmiTag == 0); |
| __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq); |
| __ Ret(eq); |
| |
| // Handle not identical strings. |
| |
| // Check that both strings are internalized strings. If they are, we're done |
| // because we already know they are not identical. We know they are both |
| // strings. |
| if (equality) { |
| ASSERT(GetCondition() == eq); |
| STATIC_ASSERT(kInternalizedTag == 0); |
| __ orr(tmp3, tmp1, Operand(tmp2)); |
| __ tst(tmp3, Operand(kIsNotInternalizedMask)); |
| // Make sure r0 is non-zero. At this point input operands are |
| // guaranteed to be non-zero. |
| ASSERT(right.is(r0)); |
| __ Ret(eq); |
| } |
| |
| // Check that both strings are sequential ASCII. |
| Label runtime; |
| __ JumpIfBothInstanceTypesAreNotSequentialAscii( |
| tmp1, tmp2, tmp3, tmp4, &runtime); |
| |
| // Compare flat ASCII strings. Returns when done. |
| if (equality) { |
| StringCompareStub::GenerateFlatAsciiStringEquals( |
| masm, left, right, tmp1, tmp2, tmp3); |
| } else { |
| StringCompareStub::GenerateCompareFlatAsciiStrings( |
| masm, left, right, tmp1, tmp2, tmp3, tmp4); |
| } |
| |
| // Handle more complex cases in runtime. |
| __ bind(&runtime); |
| __ Push(left, right); |
| if (equality) { |
| __ TailCallRuntime(Runtime::kStringEquals, 2, 1); |
| } else { |
| __ TailCallRuntime(Runtime::kStringCompare, 2, 1); |
| } |
| |
| __ bind(&miss); |
| GenerateMiss(masm); |
| } |
| |
| |
| void ICCompareStub::GenerateObjects(MacroAssembler* masm) { |
| ASSERT(state_ == CompareIC::OBJECT); |
| Label miss; |
| __ and_(r2, r1, Operand(r0)); |
| __ JumpIfSmi(r2, &miss); |
| |
| __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE); |
| __ b(ne, &miss); |
| __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE); |
| __ b(ne, &miss); |
| |
| ASSERT(GetCondition() == eq); |
| __ sub(r0, r0, Operand(r1)); |
| __ Ret(); |
| |
| __ bind(&miss); |
| GenerateMiss(masm); |
| } |
| |
| |
| void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) { |
| Label miss; |
| __ and_(r2, r1, Operand(r0)); |
| __ JumpIfSmi(r2, &miss); |
| __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); |
| __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); |
| __ cmp(r2, Operand(known_map_)); |
| __ b(ne, &miss); |
| __ cmp(r3, Operand(known_map_)); |
| __ b(ne, &miss); |
| |
| __ sub(r0, r0, Operand(r1)); |
| __ Ret(); |
| |
| __ bind(&miss); |
| GenerateMiss(masm); |
| } |
| |
| |
| |
| void ICCompareStub::GenerateMiss(MacroAssembler* masm) { |
| { |
| // Call the runtime system in a fresh internal frame. |
| ExternalReference miss = |
| ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate()); |
| |
| FrameScope scope(masm, StackFrame::INTERNAL); |
| __ Push(r1, r0); |
| __ push(lr); |
| __ Push(r1, r0); |
| __ mov(ip, Operand(Smi::FromInt(op_))); |
| __ push(ip); |
| __ CallExternalReference(miss, 3); |
| // Compute the entry point of the rewritten stub. |
| __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| // Restore registers. |
| __ pop(lr); |
| __ pop(r0); |
| __ pop(r1); |
| } |
| |
| __ Jump(r2); |
| } |
| |
| |
| void DirectCEntryStub::Generate(MacroAssembler* masm) { |
| __ ldr(pc, MemOperand(sp, 0)); |
| } |
| |
| |
| void DirectCEntryStub::GenerateCall(MacroAssembler* masm, |
| Register target) { |
| intptr_t code = |
| reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location()); |
| __ mov(lr, Operand(code, RelocInfo::CODE_TARGET)); |
| |
| // Prevent literal pool emission during calculation of return address. |
| Assembler::BlockConstPoolScope block_const_pool(masm); |
| |
| // Push return address (accessible to GC through exit frame pc). |
| // Note that using pc with str is deprecated. |
| Label start; |
| __ bind(&start); |
| __ add(ip, pc, Operand(Assembler::kInstrSize)); |
| __ str(ip, MemOperand(sp, 0)); |
| __ Jump(target); // Call the C++ function. |
| ASSERT_EQ(Assembler::kInstrSize + Assembler::kPcLoadDelta, |
| masm->SizeOfCodeGeneratedSince(&start)); |
| __ VFPEnsureFPSCRState(r2); |
| } |
| |
| |
| void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, |
| Label* miss, |
| Label* done, |
| Register receiver, |
| Register properties, |
| Handle<Name> name, |
| Register scratch0) { |
| ASSERT(name->IsUniqueName()); |
| // If names of slots in range from 1 to kProbes - 1 for the hash value are |
| // not equal to the name and kProbes-th slot is not used (its name is the |
| // undefined value), it guarantees the hash table doesn't contain the |
| // property. It's true even if some slots represent deleted properties |
| // (their names are the hole value). |
| for (int i = 0; i < kInlinedProbes; i++) { |
| // scratch0 points to properties hash. |
| // Compute the masked index: (hash + i + i * i) & mask. |
| Register index = scratch0; |
| // Capacity is smi 2^n. |
| __ ldr(index, FieldMemOperand(properties, kCapacityOffset)); |
| __ sub(index, index, Operand(1)); |
| __ and_(index, index, Operand( |
| Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i)))); |
| |
| // Scale the index by multiplying by the entry size. |
| ASSERT(NameDictionary::kEntrySize == 3); |
| __ add(index, index, Operand(index, LSL, 1)); // index *= 3. |
| |
| Register entity_name = scratch0; |
| // Having undefined at this place means the name is not contained. |
| ASSERT_EQ(kSmiTagSize, 1); |
| Register tmp = properties; |
| __ add(tmp, properties, Operand(index, LSL, 1)); |
| __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); |
| |
| ASSERT(!tmp.is(entity_name)); |
| __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex); |
| __ cmp(entity_name, tmp); |
| __ b(eq, done); |
| |
| // Load the hole ready for use below: |
| __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex); |
| |
| // Stop if found the property. |
| __ cmp(entity_name, Operand(Handle<Name>(name))); |
| __ b(eq, miss); |
| |
| Label good; |
| __ cmp(entity_name, tmp); |
| __ b(eq, &good); |
| |
| // Check if the entry name is not a unique name. |
| __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset)); |
| __ ldrb(entity_name, |
| FieldMemOperand(entity_name, Map::kInstanceTypeOffset)); |
| __ JumpIfNotUniqueName(entity_name, miss); |
| __ bind(&good); |
| |
| // Restore the properties. |
| __ ldr(properties, |
| FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
| } |
| |
| const int spill_mask = |
| (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() | |
| r2.bit() | r1.bit() | r0.bit()); |
| |
| __ stm(db_w, sp, spill_mask); |
| __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
| __ mov(r1, Operand(Handle<Name>(name))); |
| NameDictionaryLookupStub stub(NEGATIVE_LOOKUP); |
| __ CallStub(&stub); |
| __ cmp(r0, Operand::Zero()); |
| __ ldm(ia_w, sp, spill_mask); |
| |
| __ b(eq, done); |
| __ b(ne, miss); |
| } |
| |
| |
| // Probe the name dictionary in the |elements| register. Jump to the |
| // |done| label if a property with the given name is found. Jump to |
| // the |miss| label otherwise. |
| // If lookup was successful |scratch2| will be equal to elements + 4 * index. |
| void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, |
| Label* miss, |
| Label* done, |
| Register elements, |
| Register name, |
| Register scratch1, |
| Register scratch2) { |
| ASSERT(!elements.is(scratch1)); |
| ASSERT(!elements.is(scratch2)); |
| ASSERT(!name.is(scratch1)); |
| ASSERT(!name.is(scratch2)); |
| |
| __ AssertName(name); |
| |
| // Compute the capacity mask. |
| __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset)); |
| __ SmiUntag(scratch1); |
| __ sub(scratch1, scratch1, Operand(1)); |
| |
| // Generate an unrolled loop that performs a few probes before |
| // giving up. Measurements done on Gmail indicate that 2 probes |
| // cover ~93% of loads from dictionaries. |
| for (int i = 0; i < kInlinedProbes; i++) { |
| // Compute the masked index: (hash + i + i * i) & mask. |
| __ ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset)); |
| if (i > 0) { |
| // Add the probe offset (i + i * i) left shifted to avoid right shifting |
| // the hash in a separate instruction. The value hash + i + i * i is right |
| // shifted in the following and instruction. |
| ASSERT(NameDictionary::GetProbeOffset(i) < |
| 1 << (32 - Name::kHashFieldOffset)); |
| __ add(scratch2, scratch2, Operand( |
| NameDictionary::GetProbeOffset(i) << Name::kHashShift)); |
| } |
| __ and_(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift)); |
| |
| // Scale the index by multiplying by the element size. |
| ASSERT(NameDictionary::kEntrySize == 3); |
| // scratch2 = scratch2 * 3. |
| __ add(scratch2, scratch2, Operand(scratch2, LSL, 1)); |
| |
| // Check if the key is identical to the name. |
| __ add(scratch2, elements, Operand(scratch2, LSL, 2)); |
| __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset)); |
| __ cmp(name, Operand(ip)); |
| __ b(eq, done); |
| } |
| |
| const int spill_mask = |
| (lr.bit() | r6.bit() | r5.bit() | r4.bit() | |
| r3.bit() | r2.bit() | r1.bit() | r0.bit()) & |
| ~(scratch1.bit() | scratch2.bit()); |
| |
| __ stm(db_w, sp, spill_mask); |
| if (name.is(r0)) { |
| ASSERT(!elements.is(r1)); |
| __ Move(r1, name); |
| __ Move(r0, elements); |
| } else { |
| __ Move(r0, elements); |
| __ Move(r1, name); |
| } |
| NameDictionaryLookupStub stub(POSITIVE_LOOKUP); |
| __ CallStub(&stub); |
| __ cmp(r0, Operand::Zero()); |
| __ mov(scratch2, Operand(r2)); |
| __ ldm(ia_w, sp, spill_mask); |
| |
| __ b(ne, done); |
| __ b(eq, miss); |
| } |
| |
| |
| void NameDictionaryLookupStub::Generate(MacroAssembler* masm) { |
| // This stub overrides SometimesSetsUpAFrame() to return false. That means |
| // we cannot call anything that could cause a GC from this stub. |
| // Registers: |
| // result: NameDictionary to probe |
| // r1: key |
| // dictionary: NameDictionary to probe. |
| // index: will hold an index of entry if lookup is successful. |
| // might alias with result_. |
| // Returns: |
| // result_ is zero if lookup failed, non zero otherwise. |
| |
| Register result = r0; |
| Register dictionary = r0; |
| Register key = r1; |
| Register index = r2; |
| Register mask = r3; |
| Register hash = r4; |
| Register undefined = r5; |
| Register entry_key = r6; |
| |
| Label in_dictionary, maybe_in_dictionary, not_in_dictionary; |
| |
| __ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset)); |
| __ SmiUntag(mask); |
| __ sub(mask, mask, Operand(1)); |
| |
| __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset)); |
| |
| __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); |
| |
| for (int i = kInlinedProbes; i < kTotalProbes; i++) { |
| // Compute the masked index: (hash + i + i * i) & mask. |
| // Capacity is smi 2^n. |
| if (i > 0) { |
| // Add the probe offset (i + i * i) left shifted to avoid right shifting |
| // the hash in a separate instruction. The value hash + i + i * i is right |
| // shifted in the following and instruction. |
| ASSERT(NameDictionary::GetProbeOffset(i) < |
| 1 << (32 - Name::kHashFieldOffset)); |
| __ add(index, hash, Operand( |
| NameDictionary::GetProbeOffset(i) << Name::kHashShift)); |
| } else { |
| __ mov(index, Operand(hash)); |
| } |
| __ and_(index, mask, Operand(index, LSR, Name::kHashShift)); |
| |
| // Scale the index by multiplying by the entry size. |
| ASSERT(NameDictionary::kEntrySize == 3); |
| __ add(index, index, Operand(index, LSL, 1)); // index *= 3. |
| |
| ASSERT_EQ(kSmiTagSize, 1); |
| __ add(index, dictionary, Operand(index, LSL, 2)); |
| __ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset)); |
| |
| // Having undefined at this place means the name is not contained. |
| __ cmp(entry_key, Operand(undefined)); |
| __ b(eq, ¬_in_dictionary); |
| |
| // Stop if found the property. |
| __ cmp(entry_key, Operand(key)); |
| __ b(eq, &in_dictionary); |
| |
| if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) { |
| // Check if the entry name is not a unique name. |
| __ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset)); |
| __ ldrb(entry_key, |
| FieldMemOperand(entry_key, Map::kInstanceTypeOffset)); |
| __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary); |
| } |
| } |
| |
| __ bind(&maybe_in_dictionary); |
| // If we are doing negative lookup then probing failure should be |
| // treated as a lookup success. For positive lookup probing failure |
| // should be treated as lookup failure. |
| if (mode_ == POSITIVE_LOOKUP) { |
| __ mov(result, Operand::Zero()); |
| __ Ret(); |
| } |
| |
| __ bind(&in_dictionary); |
| __ mov(result, Operand(1)); |
| __ Ret(); |
| |
| __ bind(¬_in_dictionary); |
| __ mov(result, Operand::Zero()); |
| __ Ret(); |
| } |
| |
| |
| struct AheadOfTimeWriteBarrierStubList { |
| Register object, value, address; |
| RememberedSetAction action; |
| }; |
| |
| |
| #define REG(Name) { kRegister_ ## Name ## _Code } |
| |
| static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { |
| // Used in RegExpExecStub. |
| { REG(r6), REG(r4), REG(r7), EMIT_REMEMBERED_SET }, |
| // Used in CompileArrayPushCall. |
| // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore. |
| // Also used in KeyedStoreIC::GenerateGeneric. |
| { REG(r3), REG(r4), REG(r5), EMIT_REMEMBERED_SET }, |
| // Used in CompileStoreGlobal. |
| { REG(r4), REG(r1), REG(r2), OMIT_REMEMBERED_SET }, |
| // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField. |
| { REG(r1), REG(r2), REG(r3), EMIT_REMEMBERED_SET }, |
| { REG(r3), REG(r2), REG(r1), EMIT_REMEMBERED_SET }, |
| // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField. |
| { REG(r2), REG(r1), REG(r3), EMIT_REMEMBERED_SET }, |
| { REG(r3), REG(r1), REG(r2), EMIT_REMEMBERED_SET }, |
| // KeyedStoreStubCompiler::GenerateStoreFastElement. |
| { REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET }, |
| { REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET }, |
| // ElementsTransitionGenerator::GenerateMapChangeElementTransition |
| // and ElementsTransitionGenerator::GenerateSmiToDouble |
| // and ElementsTransitionGenerator::GenerateDoubleToObject |
| { REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET }, |
| { REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET }, |
| // ElementsTransitionGenerator::GenerateDoubleToObject |
| { REG(r6), REG(r2), REG(r0), EMIT_REMEMBERED_SET }, |
| { REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET }, |
| // StoreArrayLiteralElementStub::Generate |
| { REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET }, |
| // FastNewClosureStub::Generate |
| { REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET }, |
| // StringAddStub::Generate |
| { REG(r7), REG(r1), REG(r4), EMIT_REMEMBERED_SET }, |
| { REG(r7), REG(r0), REG(r4), EMIT_REMEMBERED_SET }, |
| // Null termination. |
| { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET} |
| }; |
| |
| #undef REG |
| |
| |
| bool RecordWriteStub::IsPregenerated() { |
| for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; |
| !entry->object.is(no_reg); |
| entry++) { |
| if (object_.is(entry->object) && |
| value_.is(entry->value) && |
| address_.is(entry->address) && |
| remembered_set_action_ == entry->action && |
| save_fp_regs_mode_ == kDontSaveFPRegs) { |
| return true; |
| } |
| } |
| return false; |
| } |
| |
| |
| void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( |
| Isolate* isolate) { |
| StoreBufferOverflowStub stub1(kDontSaveFPRegs); |
| stub1.GetCode(isolate)->set_is_pregenerated(true); |
| // Hydrogen code stubs need stub2 at snapshot time. |
| StoreBufferOverflowStub stub2(kSaveFPRegs); |
| stub2.GetCode(isolate)->set_is_pregenerated(true); |
| } |
| |
| |
| void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) { |
| for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; |
| !entry->object.is(no_reg); |
| entry++) { |
| RecordWriteStub stub(entry->object, |
| entry->value, |
| entry->address, |
| entry->action, |
| kDontSaveFPRegs); |
| stub.GetCode(isolate)->set_is_pregenerated(true); |
| } |
| } |
| |
| |
| bool CodeStub::CanUseFPRegisters() { |
| return true; // VFP2 is a base requirement for V8 |
| } |
| |
| |
| // Takes the input in 3 registers: address_ value_ and object_. A pointer to |
| // the value has just been written into the object, now this stub makes sure |
| // we keep the GC informed. The word in the object where the value has been |
| // written is in the address register. |
| void RecordWriteStub::Generate(MacroAssembler* masm) { |
| Label skip_to_incremental_noncompacting; |
| Label skip_to_incremental_compacting; |
| |
| // The first two instructions are generated with labels so as to get the |
| // offset fixed up correctly by the bind(Label*) call. We patch it back and |
| // forth between a compare instructions (a nop in this position) and the |
| // real branch when we start and stop incremental heap marking. |
| // See RecordWriteStub::Patch for details. |
| { |
| // Block literal pool emission, as the position of these two instructions |
| // is assumed by the patching code. |
| Assembler::BlockConstPoolScope block_const_pool(masm); |
| __ b(&skip_to_incremental_noncompacting); |
| __ b(&skip_to_incremental_compacting); |
| } |
| |
| if (remembered_set_action_ == EMIT_REMEMBERED_SET) { |
| __ RememberedSetHelper(object_, |
| address_, |
| value_, |
| save_fp_regs_mode_, |
| MacroAssembler::kReturnAtEnd); |
| } |
| __ Ret(); |
| |
| __ bind(&skip_to_incremental_noncompacting); |
| GenerateIncremental(masm, INCREMENTAL); |
| |
| __ bind(&skip_to_incremental_compacting); |
| GenerateIncremental(masm, INCREMENTAL_COMPACTION); |
| |
| // Initial mode of the stub is expected to be STORE_BUFFER_ONLY. |
| // Will be checked in IncrementalMarking::ActivateGeneratedStub. |
| ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12)); |
| ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12)); |
| PatchBranchIntoNop(masm, 0); |
| PatchBranchIntoNop(masm, Assembler::kInstrSize); |
| } |
| |
| |
| void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { |
| regs_.Save(masm); |
| |
| if (remembered_set_action_ == EMIT_REMEMBERED_SET) { |
| Label dont_need_remembered_set; |
| |
| __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0)); |
| __ JumpIfNotInNewSpace(regs_.scratch0(), // Value. |
| regs_.scratch0(), |
| &dont_need_remembered_set); |
| |
| __ CheckPageFlag(regs_.object(), |
| regs_.scratch0(), |
| 1 << MemoryChunk::SCAN_ON_SCAVENGE, |
| ne, |
| &dont_need_remembered_set); |
| |
| // First notify the incremental marker if necessary, then update the |
| // remembered set. |
| CheckNeedsToInformIncrementalMarker( |
| masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode); |
| InformIncrementalMarker(masm, mode); |
| regs_.Restore(masm); |
| __ RememberedSetHelper(object_, |
| address_, |
| value_, |
| save_fp_regs_mode_, |
| MacroAssembler::kReturnAtEnd); |
| |
| __ bind(&dont_need_remembered_set); |
| } |
| |
| CheckNeedsToInformIncrementalMarker( |
| masm, kReturnOnNoNeedToInformIncrementalMarker, mode); |
| InformIncrementalMarker(masm, mode); |
| regs_.Restore(masm); |
| __ Ret(); |
| } |
| |
| |
| void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { |
| regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); |
| int argument_count = 3; |
| __ PrepareCallCFunction(argument_count, regs_.scratch0()); |
| Register address = |
| r0.is(regs_.address()) ? regs_.scratch0() : regs_.address(); |
| ASSERT(!address.is(regs_.object())); |
| ASSERT(!address.is(r0)); |
| __ Move(address, regs_.address()); |
| __ Move(r0, regs_.object()); |
| __ Move(r1, address); |
| __ mov(r2, Operand(ExternalReference::isolate_address(masm->isolate()))); |
| |
| AllowExternalCallThatCantCauseGC scope(masm); |
| if (mode == INCREMENTAL_COMPACTION) { |
| __ CallCFunction( |
| ExternalReference::incremental_evacuation_record_write_function( |
| masm->isolate()), |
| argument_count); |
| } else { |
| ASSERT(mode == INCREMENTAL); |
| __ CallCFunction( |
| ExternalReference::incremental_marking_record_write_function( |
| masm->isolate()), |
| argument_count); |
| } |
| regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); |
| } |
| |
| |
| void RecordWriteStub::CheckNeedsToInformIncrementalMarker( |
| MacroAssembler* masm, |
| OnNoNeedToInformIncrementalMarker on_no_need, |
| Mode mode) { |
| Label on_black; |
| Label need_incremental; |
| Label need_incremental_pop_scratch; |
| |
| __ and_(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask)); |
| __ ldr(regs_.scratch1(), |
| MemOperand(regs_.scratch0(), |
| MemoryChunk::kWriteBarrierCounterOffset)); |
| __ sub(regs_.scratch1(), regs_.scratch1(), Operand(1), SetCC); |
| __ str(regs_.scratch1(), |
| MemOperand(regs_.scratch0(), |
| MemoryChunk::kWriteBarrierCounterOffset)); |
| __ b(mi, &need_incremental); |
| |
| // Let's look at the color of the object: If it is not black we don't have |
| // to inform the incremental marker. |
| __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black); |
| |
| regs_.Restore(masm); |
| if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { |
| __ RememberedSetHelper(object_, |
| address_, |
| value_, |
| save_fp_regs_mode_, |
| MacroAssembler::kReturnAtEnd); |
| } else { |
| __ Ret(); |
| } |
| |
| __ bind(&on_black); |
| |
| // Get the value from the slot. |
| __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0)); |
| |
| if (mode == INCREMENTAL_COMPACTION) { |
| Label ensure_not_white; |
| |
| __ CheckPageFlag(regs_.scratch0(), // Contains value. |
| regs_.scratch1(), // Scratch. |
| MemoryChunk::kEvacuationCandidateMask, |
| eq, |
| &ensure_not_white); |
| |
| __ CheckPageFlag(regs_.object(), |
| regs_.scratch1(), // Scratch. |
| MemoryChunk::kSkipEvacuationSlotsRecordingMask, |
| eq, |
| &need_incremental); |
| |
| __ bind(&ensure_not_white); |
| } |
| |
| // We need extra registers for this, so we push the object and the address |
| // register temporarily. |
| __ Push(regs_.object(), regs_.address()); |
| __ EnsureNotWhite(regs_.scratch0(), // The value. |
| regs_.scratch1(), // Scratch. |
| regs_.object(), // Scratch. |
| regs_.address(), // Scratch. |
| &need_incremental_pop_scratch); |
| __ Pop(regs_.object(), regs_.address()); |
| |
| regs_.Restore(masm); |
| if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { |
| __ RememberedSetHelper(object_, |
| address_, |
| value_, |
| save_fp_regs_mode_, |
| MacroAssembler::kReturnAtEnd); |
| } else { |
| __ Ret(); |
| } |
| |
| __ bind(&need_incremental_pop_scratch); |
| __ Pop(regs_.object(), regs_.address()); |
| |
| __ bind(&need_incremental); |
| |
| // Fall through when we need to inform the incremental marker. |
| } |
| |
| |
| void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { |
| // ----------- S t a t e ------------- |
| // -- r0 : element value to store |
| // -- r3 : element index as smi |
| // -- sp[0] : array literal index in function as smi |
| // -- sp[4] : array literal |
| // clobbers r1, r2, r4 |
| // ----------------------------------- |
| |
| Label element_done; |
| Label double_elements; |
| Label smi_element; |
| Label slow_elements; |
| Label fast_elements; |
| |
| // Get array literal index, array literal and its map. |
| __ ldr(r4, MemOperand(sp, 0 * kPointerSize)); |
| __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); |
| __ ldr(r2, FieldMemOperand(r1, JSObject::kMapOffset)); |
| |
| __ CheckFastElements(r2, r5, &double_elements); |
| // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS |
| __ JumpIfSmi(r0, &smi_element); |
| __ CheckFastSmiElements(r2, r5, &fast_elements); |
| |
| // Store into the array literal requires a elements transition. Call into |
| // the runtime. |
| __ bind(&slow_elements); |
| // call. |
| __ Push(r1, r3, r0); |
| __ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
| __ ldr(r5, FieldMemOperand(r5, JSFunction::kLiteralsOffset)); |
| __ Push(r5, r4); |
| __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1); |
| |
| // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object. |
| __ bind(&fast_elements); |
| __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); |
| __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3)); |
| __ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| __ str(r0, MemOperand(r6, 0)); |
| // Update the write barrier for the array store. |
| __ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs, |
| EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); |
| __ Ret(); |
| |
| // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS, |
| // and value is Smi. |
| __ bind(&smi_element); |
| __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); |
| __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3)); |
| __ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize)); |
| __ Ret(); |
| |
| // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS. |
| __ bind(&double_elements); |
| __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); |
| __ StoreNumberToDoubleElements(r0, r3, r5, r6, d0, &slow_elements); |
| __ Ret(); |
| } |
| |
| |
| void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { |
| CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs); |
| __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); |
| int parameter_count_offset = |
| StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; |
| __ ldr(r1, MemOperand(fp, parameter_count_offset)); |
| if (function_mode_ == JS_FUNCTION_STUB_MODE) { |
| __ add(r1, r1, Operand(1)); |
| } |
| masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); |
| __ mov(r1, Operand(r1, LSL, kPointerSizeLog2)); |
| __ add(sp, sp, r1); |
| __ Ret(); |
| } |
| |
| |
| void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { |
| if (masm->isolate()->function_entry_hook() != NULL) { |
| PredictableCodeSizeScope predictable(masm, 4 * Assembler::kInstrSize); |
| AllowStubCallsScope allow_stub_calls(masm, true); |
| ProfileEntryHookStub stub; |
| __ push(lr); |
| __ CallStub(&stub); |
| __ pop(lr); |
| } |
| } |
| |
| |
| void ProfileEntryHookStub::Generate(MacroAssembler* masm) { |
| // The entry hook is a "push lr" instruction, followed by a call. |
| const int32_t kReturnAddressDistanceFromFunctionStart = |
| 3 * Assembler::kInstrSize; |
| |
| // This should contain all kCallerSaved registers. |
| const RegList kSavedRegs = |
| 1 << 0 | // r0 |
| 1 << 1 | // r1 |
| 1 << 2 | // r2 |
| 1 << 3 | // r3 |
| 1 << 5 | // r5 |
| 1 << 9; // r9 |
| // We also save lr, so the count here is one higher than the mask indicates. |
| const int32_t kNumSavedRegs = 7; |
| |
| ASSERT((kCallerSaved & kSavedRegs) == kCallerSaved); |
| |
| // Save all caller-save registers as this may be called from anywhere. |
| __ stm(db_w, sp, kSavedRegs | lr.bit()); |
| |
| // Compute the function's address for the first argument. |
| __ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart)); |
| |
| // The caller's return address is above the saved temporaries. |
| // Grab that for the second argument to the hook. |
| __ add(r1, sp, Operand(kNumSavedRegs * kPointerSize)); |
| |
| // Align the stack if necessary. |
| int frame_alignment = masm->ActivationFrameAlignment(); |
| if (frame_alignment > kPointerSize) { |
| __ mov(r5, sp); |
| ASSERT(IsPowerOf2(frame_alignment)); |
| __ and_(sp, sp, Operand(-frame_alignment)); |
| } |
| |
| #if V8_HOST_ARCH_ARM |
| int32_t entry_hook = |
| reinterpret_cast<int32_t>(masm->isolate()->function_entry_hook()); |
| __ mov(ip, Operand(entry_hook)); |
| #else |
| // Under the simulator we need to indirect the entry hook through a |
| // trampoline function at a known address. |
| ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline)); |
| __ mov(ip, Operand(ExternalReference(&dispatcher, |
| ExternalReference::BUILTIN_CALL, |
| masm->isolate()))); |
| #endif |
| __ Call(ip); |
| |
| // Restore the stack pointer if needed. |
| if (frame_alignment > kPointerSize) { |
| __ mov(sp, r5); |
| } |
| |
| // Also pop pc to get Ret(0). |
| __ ldm(ia_w, sp, kSavedRegs | pc.bit()); |
| } |
| |
| |
| template<class T> |
| static void CreateArrayDispatch(MacroAssembler* masm) { |
| int last_index = GetSequenceIndexFromFastElementsKind( |
| TERMINAL_FAST_ELEMENTS_KIND); |
| for (int i = 0; i <= last_index; ++i) { |
| Label next; |
| ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); |
| __ cmp(r3, Operand(kind)); |
| __ b(ne, &next); |
| T stub(kind); |
| __ TailCallStub(&stub); |
| __ bind(&next); |
| } |
| |
| // If we reached this point there is a problem. |
| __ Abort(kUnexpectedElementsKindInArrayConstructor); |
| } |
| |
| |
| static void CreateArrayDispatchOneArgument(MacroAssembler* masm) { |
| // r2 - type info cell |
| // r3 - kind |
| // r0 - number of arguments |
| // r1 - constructor? |
| // sp[0] - last argument |
| ASSERT(FAST_SMI_ELEMENTS == 0); |
| ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); |
| ASSERT(FAST_ELEMENTS == 2); |
| ASSERT(FAST_HOLEY_ELEMENTS == 3); |
| ASSERT(FAST_DOUBLE_ELEMENTS == 4); |
| ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5); |
| |
| // is the low bit set? If so, we are holey and that is good. |
| __ tst(r3, Operand(1)); |
| Label normal_sequence; |
| __ b(ne, &normal_sequence); |
| |
| // look at the first argument |
| __ ldr(r5, MemOperand(sp, 0)); |
| __ cmp(r5, Operand::Zero()); |
| __ b(eq, &normal_sequence); |
| |
| // We are going to create a holey array, but our kind is non-holey. |
| // Fix kind and retry (only if we have an allocation site in the cell). |
| __ add(r3, r3, Operand(1)); |
| __ CompareRoot(r2, Heap::kUndefinedValueRootIndex); |
| __ b(eq, &normal_sequence); |
| __ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset)); |
| __ ldr(r5, FieldMemOperand(r5, 0)); |
| __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex); |
| __ b(ne, &normal_sequence); |
| |
| // Save the resulting elements kind in type info |
| __ SmiTag(r3); |
| __ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset)); |
| __ str(r3, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset)); |
| __ SmiUntag(r3); |
| |
| __ bind(&normal_sequence); |
| int last_index = GetSequenceIndexFromFastElementsKind( |
| TERMINAL_FAST_ELEMENTS_KIND); |
| for (int i = 0; i <= last_index; ++i) { |
| Label next; |
| ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); |
| __ cmp(r3, Operand(kind)); |
| __ b(ne, &next); |
| ArraySingleArgumentConstructorStub stub(kind); |
| __ TailCallStub(&stub); |
| __ bind(&next); |
| } |
| |
| // If we reached this point there is a problem. |
| __ Abort(kUnexpectedElementsKindInArrayConstructor); |
| } |
| |
| |
| template<class T> |
| static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) { |
| int to_index = GetSequenceIndexFromFastElementsKind( |
| TERMINAL_FAST_ELEMENTS_KIND); |
| for (int i = 0; i <= to_index; ++i) { |
| ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); |
| T stub(kind); |
| stub.GetCode(isolate)->set_is_pregenerated(true); |
| if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) { |
| T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES); |
| stub1.GetCode(isolate)->set_is_pregenerated(true); |
| } |
| } |
| } |
| |
| |
| void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) { |
| ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>( |
| isolate); |
| ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>( |
| isolate); |
| ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>( |
| isolate); |
| } |
| |
| |
| void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime( |
| Isolate* isolate) { |
| ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS }; |
| for (int i = 0; i < 2; i++) { |
| // For internal arrays we only need a few things |
| InternalArrayNoArgumentConstructorStub stubh1(kinds[i]); |
| stubh1.GetCode(isolate)->set_is_pregenerated(true); |
| InternalArraySingleArgumentConstructorStub stubh2(kinds[i]); |
| stubh2.GetCode(isolate)->set_is_pregenerated(true); |
| InternalArrayNArgumentsConstructorStub stubh3(kinds[i]); |
| stubh3.GetCode(isolate)->set_is_pregenerated(true); |
| } |
| } |
| |
| |
| void ArrayConstructorStub::Generate(MacroAssembler* masm) { |
| // ----------- S t a t e ------------- |
| // -- r0 : argc (only if argument_count_ == ANY) |
| // -- r1 : constructor |
| // -- r2 : type info cell |
| // -- sp[0] : return address |
| // -- sp[4] : last argument |
| // ----------------------------------- |
| if (FLAG_debug_code) { |
| // The array construct code is only set for the global and natives |
| // builtin Array functions which always have maps. |
| |
| // Initial map for the builtin Array function should be a map. |
| __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); |
| // Will both indicate a NULL and a Smi. |
| __ tst(r3, Operand(kSmiTagMask)); |
| __ Assert(ne, kUnexpectedInitialMapForArrayFunction); |
| __ CompareObjectType(r3, r3, r4, MAP_TYPE); |
| __ Assert(eq, kUnexpectedInitialMapForArrayFunction); |
| |
| // We should either have undefined in ebx or a valid cell |
| Label okay_here; |
| Handle<Map> cell_map = masm->isolate()->factory()->cell_map(); |
| __ CompareRoot(r2, Heap::kUndefinedValueRootIndex); |
| __ b(eq, &okay_here); |
| __ ldr(r3, FieldMemOperand(r2, 0)); |
| __ cmp(r3, Operand(cell_map)); |
| __ Assert(eq, kExpectedPropertyCellInRegisterEbx); |
| __ bind(&okay_here); |
| } |
| |
| Label no_info, switch_ready; |
| // Get the elements kind and case on that. |
| __ CompareRoot(r2, Heap::kUndefinedValueRootIndex); |
| __ b(eq, &no_info); |
| __ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset)); |
| |
| // The type cell may have undefined in its value. |
| __ CompareRoot(r3, Heap::kUndefinedValueRootIndex); |
| __ b(eq, &no_info); |
| |
| // The type cell has either an AllocationSite or a JSFunction |
| __ ldr(r4, FieldMemOperand(r3, 0)); |
| __ CompareRoot(r4, Heap::kAllocationSiteMapRootIndex); |
| __ b(ne, &no_info); |
| |
| __ ldr(r3, FieldMemOperand(r3, AllocationSite::kTransitionInfoOffset)); |
| __ SmiUntag(r3); |
| __ jmp(&switch_ready); |
| __ bind(&no_info); |
| __ mov(r3, Operand(GetInitialFastElementsKind())); |
| __ bind(&switch_ready); |
| |
| if (argument_count_ == ANY) { |
| Label not_zero_case, not_one_case; |
| __ tst(r0, r0); |
| __ b(ne, ¬_zero_case); |
| CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm); |
| |
| __ bind(¬_zero_case); |
| __ cmp(r0, Operand(1)); |
| __ b(gt, ¬_one_case); |
| CreateArrayDispatchOneArgument(masm); |
| |
| __ bind(¬_one_case); |
| CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm); |
| } else if (argument_count_ == NONE) { |
| CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm); |
| } else if (argument_count_ == ONE) { |
| CreateArrayDispatchOneArgument(masm); |
| } else if (argument_count_ == MORE_THAN_ONE) { |
| CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm); |
| } else { |
| UNREACHABLE(); |
| } |
| } |
| |
| |
| void InternalArrayConstructorStub::GenerateCase( |
| MacroAssembler* masm, ElementsKind kind) { |
| Label not_zero_case, not_one_case; |
| Label normal_sequence; |
| |
| __ tst(r0, r0); |
| __ b(ne, ¬_zero_case); |
| InternalArrayNoArgumentConstructorStub stub0(kind); |
| __ TailCallStub(&stub0); |
| |
| __ bind(¬_zero_case); |
| __ cmp(r0, Operand(1)); |
| __ b(gt, ¬_one_case); |
| |
| if (IsFastPackedElementsKind(kind)) { |
| // We might need to create a holey array |
| // look at the first argument |
| __ ldr(r3, MemOperand(sp, 0)); |
| __ cmp(r3, Operand::Zero()); |
| __ b(eq, &normal_sequence); |
| |
| InternalArraySingleArgumentConstructorStub |
| stub1_holey(GetHoleyElementsKind(kind)); |
| __ TailCallStub(&stub1_holey); |
| } |
| |
| __ bind(&normal_sequence); |
| InternalArraySingleArgumentConstructorStub stub1(kind); |
| __ TailCallStub(&stub1); |
| |
| __ bind(¬_one_case); |
| InternalArrayNArgumentsConstructorStub stubN(kind); |
| __ TailCallStub(&stubN); |
| } |
| |
| |
| void InternalArrayConstructorStub::Generate(MacroAssembler* masm) { |
| // ----------- S t a t e ------------- |
| // -- r0 : argc |
| // -- r1 : constructor |
| // -- sp[0] : return address |
| // -- sp[4] : last argument |
| // ----------------------------------- |
| |
| if (FLAG_debug_code) { |
| // The array construct code is only set for the global and natives |
| // builtin Array functions which always have maps. |
| |
| // Initial map for the builtin Array function should be a map. |
| __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); |
| // Will both indicate a NULL and a Smi. |
| __ tst(r3, Operand(kSmiTagMask)); |
| __ Assert(ne, kUnexpectedInitialMapForArrayFunction); |
| __ CompareObjectType(r3, r3, r4, MAP_TYPE); |
| __ Assert(eq, kUnexpectedInitialMapForArrayFunction); |
| } |
| |
| // Figure out the right elements kind |
| __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); |
| // Load the map's "bit field 2" into |result|. We only need the first byte, |
| // but the following bit field extraction takes care of that anyway. |
| __ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset)); |
| // Retrieve elements_kind from bit field 2. |
| __ Ubfx(r3, r3, Map::kElementsKindShift, Map::kElementsKindBitCount); |
| |
| if (FLAG_debug_code) { |
| Label done; |
| __ cmp(r3, Operand(FAST_ELEMENTS)); |
| __ b(eq, &done); |
| __ cmp(r3, Operand(FAST_HOLEY_ELEMENTS)); |
| __ Assert(eq, |
| kInvalidElementsKindForInternalArrayOrInternalPackedArray); |
| __ bind(&done); |
| } |
| |
| Label fast_elements_case; |
| __ cmp(r3, Operand(FAST_ELEMENTS)); |
| __ b(eq, &fast_elements_case); |
| GenerateCase(masm, FAST_HOLEY_ELEMENTS); |
| |
| __ bind(&fast_elements_case); |
| GenerateCase(masm, FAST_ELEMENTS); |
| } |
| |
| |
| #undef __ |
| |
| } } // namespace v8::internal |
| |
| #endif // V8_TARGET_ARCH_ARM |