blob: 4848fad8bddd54ca30e8fa0be3cb95176d14c7e9 [file] [log] [blame]
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Review notes:
//
// - The use of macros in these inline functions may seem superfluous
// but it is absolutely needed to make sure gcc generates optimal
// code. gcc is not happy when attempting to inline too deep.
//
#ifndef V8_OBJECTS_INL_H_
#define V8_OBJECTS_INL_H_
#include "src/base/atomicops.h"
#include "src/elements.h"
#include "src/objects.h"
#include "src/contexts.h"
#include "src/conversions-inl.h"
#include "src/field-index-inl.h"
#include "src/heap.h"
#include "src/isolate.h"
#include "src/heap-inl.h"
#include "src/property.h"
#include "src/spaces.h"
#include "src/store-buffer.h"
#include "src/v8memory.h"
#include "src/factory.h"
#include "src/incremental-marking.h"
#include "src/transitions-inl.h"
#include "src/objects-visiting.h"
#include "src/lookup.h"
namespace v8 {
namespace internal {
PropertyDetails::PropertyDetails(Smi* smi) {
value_ = smi->value();
}
Smi* PropertyDetails::AsSmi() const {
// Ensure the upper 2 bits have the same value by sign extending it. This is
// necessary to be able to use the 31st bit of the property details.
int value = value_ << 1;
return Smi::FromInt(value >> 1);
}
PropertyDetails PropertyDetails::AsDeleted() const {
Smi* smi = Smi::FromInt(value_ | DeletedField::encode(1));
return PropertyDetails(smi);
}
#define TYPE_CHECKER(type, instancetype) \
bool Object::Is##type() { \
return Object::IsHeapObject() && \
HeapObject::cast(this)->map()->instance_type() == instancetype; \
}
#define CAST_ACCESSOR(type) \
type* type::cast(Object* object) { \
SLOW_ASSERT(object->Is##type()); \
return reinterpret_cast<type*>(object); \
}
#define INT_ACCESSORS(holder, name, offset) \
int holder::name() { return READ_INT_FIELD(this, offset); } \
void holder::set_##name(int value) { WRITE_INT_FIELD(this, offset, value); }
#define ACCESSORS(holder, name, type, offset) \
type* holder::name() { return type::cast(READ_FIELD(this, offset)); } \
void holder::set_##name(type* value, WriteBarrierMode mode) { \
WRITE_FIELD(this, offset, value); \
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); \
}
// Getter that returns a tagged Smi and setter that writes a tagged Smi.
#define ACCESSORS_TO_SMI(holder, name, offset) \
Smi* holder::name() { return Smi::cast(READ_FIELD(this, offset)); } \
void holder::set_##name(Smi* value, WriteBarrierMode mode) { \
WRITE_FIELD(this, offset, value); \
}
// Getter that returns a Smi as an int and writes an int as a Smi.
#define SMI_ACCESSORS(holder, name, offset) \
int holder::name() { \
Object* value = READ_FIELD(this, offset); \
return Smi::cast(value)->value(); \
} \
void holder::set_##name(int value) { \
WRITE_FIELD(this, offset, Smi::FromInt(value)); \
}
#define SYNCHRONIZED_SMI_ACCESSORS(holder, name, offset) \
int holder::synchronized_##name() { \
Object* value = ACQUIRE_READ_FIELD(this, offset); \
return Smi::cast(value)->value(); \
} \
void holder::synchronized_set_##name(int value) { \
RELEASE_WRITE_FIELD(this, offset, Smi::FromInt(value)); \
}
#define NOBARRIER_SMI_ACCESSORS(holder, name, offset) \
int holder::nobarrier_##name() { \
Object* value = NOBARRIER_READ_FIELD(this, offset); \
return Smi::cast(value)->value(); \
} \
void holder::nobarrier_set_##name(int value) { \
NOBARRIER_WRITE_FIELD(this, offset, Smi::FromInt(value)); \
}
#define BOOL_GETTER(holder, field, name, offset) \
bool holder::name() { \
return BooleanBit::get(field(), offset); \
} \
#define BOOL_ACCESSORS(holder, field, name, offset) \
bool holder::name() { \
return BooleanBit::get(field(), offset); \
} \
void holder::set_##name(bool value) { \
set_##field(BooleanBit::set(field(), offset, value)); \
}
bool Object::IsFixedArrayBase() {
return IsFixedArray() || IsFixedDoubleArray() || IsConstantPoolArray() ||
IsFixedTypedArrayBase() || IsExternalArray();
}
// External objects are not extensible, so the map check is enough.
bool Object::IsExternal() {
return Object::IsHeapObject() &&
HeapObject::cast(this)->map() ==
HeapObject::cast(this)->GetHeap()->external_map();
}
bool Object::IsAccessorInfo() {
return IsExecutableAccessorInfo() || IsDeclaredAccessorInfo();
}
bool Object::IsSmi() {
return HAS_SMI_TAG(this);
}
bool Object::IsHeapObject() {
return Internals::HasHeapObjectTag(this);
}
TYPE_CHECKER(HeapNumber, HEAP_NUMBER_TYPE)
TYPE_CHECKER(Symbol, SYMBOL_TYPE)
bool Object::IsString() {
return Object::IsHeapObject()
&& HeapObject::cast(this)->map()->instance_type() < FIRST_NONSTRING_TYPE;
}
bool Object::IsName() {
return IsString() || IsSymbol();
}
bool Object::IsUniqueName() {
return IsInternalizedString() || IsSymbol();
}
bool Object::IsSpecObject() {
return Object::IsHeapObject()
&& HeapObject::cast(this)->map()->instance_type() >= FIRST_SPEC_OBJECT_TYPE;
}
bool Object::IsSpecFunction() {
if (!Object::IsHeapObject()) return false;
InstanceType type = HeapObject::cast(this)->map()->instance_type();
return type == JS_FUNCTION_TYPE || type == JS_FUNCTION_PROXY_TYPE;
}
bool Object::IsTemplateInfo() {
return IsObjectTemplateInfo() || IsFunctionTemplateInfo();
}
bool Object::IsInternalizedString() {
if (!this->IsHeapObject()) return false;
uint32_t type = HeapObject::cast(this)->map()->instance_type();
STATIC_ASSERT(kNotInternalizedTag != 0);
return (type & (kIsNotStringMask | kIsNotInternalizedMask)) ==
(kStringTag | kInternalizedTag);
}
bool Object::IsConsString() {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsCons();
}
bool Object::IsSlicedString() {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsSliced();
}
bool Object::IsSeqString() {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsSequential();
}
bool Object::IsSeqOneByteString() {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsSequential() &&
String::cast(this)->IsOneByteRepresentation();
}
bool Object::IsSeqTwoByteString() {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsSequential() &&
String::cast(this)->IsTwoByteRepresentation();
}
bool Object::IsExternalString() {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsExternal();
}
bool Object::IsExternalAsciiString() {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsExternal() &&
String::cast(this)->IsOneByteRepresentation();
}
bool Object::IsExternalTwoByteString() {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsExternal() &&
String::cast(this)->IsTwoByteRepresentation();
}
bool Object::HasValidElements() {
// Dictionary is covered under FixedArray.
return IsFixedArray() || IsFixedDoubleArray() || IsExternalArray() ||
IsFixedTypedArrayBase();
}
Handle<Object> Object::NewStorageFor(Isolate* isolate,
Handle<Object> object,
Representation representation) {
if (representation.IsSmi() && object->IsUninitialized()) {
return handle(Smi::FromInt(0), isolate);
}
if (!representation.IsDouble()) return object;
if (object->IsUninitialized()) {
return isolate->factory()->NewHeapNumber(0);
}
return isolate->factory()->NewHeapNumber(object->Number());
}
StringShape::StringShape(String* str)
: type_(str->map()->instance_type()) {
set_valid();
ASSERT((type_ & kIsNotStringMask) == kStringTag);
}
StringShape::StringShape(Map* map)
: type_(map->instance_type()) {
set_valid();
ASSERT((type_ & kIsNotStringMask) == kStringTag);
}
StringShape::StringShape(InstanceType t)
: type_(static_cast<uint32_t>(t)) {
set_valid();
ASSERT((type_ & kIsNotStringMask) == kStringTag);
}
bool StringShape::IsInternalized() {
ASSERT(valid());
STATIC_ASSERT(kNotInternalizedTag != 0);
return (type_ & (kIsNotStringMask | kIsNotInternalizedMask)) ==
(kStringTag | kInternalizedTag);
}
bool String::IsOneByteRepresentation() {
uint32_t type = map()->instance_type();
return (type & kStringEncodingMask) == kOneByteStringTag;
}
bool String::IsTwoByteRepresentation() {
uint32_t type = map()->instance_type();
return (type & kStringEncodingMask) == kTwoByteStringTag;
}
bool String::IsOneByteRepresentationUnderneath() {
uint32_t type = map()->instance_type();
STATIC_ASSERT(kIsIndirectStringTag != 0);
STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
ASSERT(IsFlat());
switch (type & (kIsIndirectStringMask | kStringEncodingMask)) {
case kOneByteStringTag:
return true;
case kTwoByteStringTag:
return false;
default: // Cons or sliced string. Need to go deeper.
return GetUnderlying()->IsOneByteRepresentation();
}
}
bool String::IsTwoByteRepresentationUnderneath() {
uint32_t type = map()->instance_type();
STATIC_ASSERT(kIsIndirectStringTag != 0);
STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
ASSERT(IsFlat());
switch (type & (kIsIndirectStringMask | kStringEncodingMask)) {
case kOneByteStringTag:
return false;
case kTwoByteStringTag:
return true;
default: // Cons or sliced string. Need to go deeper.
return GetUnderlying()->IsTwoByteRepresentation();
}
}
bool String::HasOnlyOneByteChars() {
uint32_t type = map()->instance_type();
return (type & kOneByteDataHintMask) == kOneByteDataHintTag ||
IsOneByteRepresentation();
}
bool StringShape::IsCons() {
return (type_ & kStringRepresentationMask) == kConsStringTag;
}
bool StringShape::IsSliced() {
return (type_ & kStringRepresentationMask) == kSlicedStringTag;
}
bool StringShape::IsIndirect() {
return (type_ & kIsIndirectStringMask) == kIsIndirectStringTag;
}
bool StringShape::IsExternal() {
return (type_ & kStringRepresentationMask) == kExternalStringTag;
}
bool StringShape::IsSequential() {
return (type_ & kStringRepresentationMask) == kSeqStringTag;
}
StringRepresentationTag StringShape::representation_tag() {
uint32_t tag = (type_ & kStringRepresentationMask);
return static_cast<StringRepresentationTag>(tag);
}
uint32_t StringShape::encoding_tag() {
return type_ & kStringEncodingMask;
}
uint32_t StringShape::full_representation_tag() {
return (type_ & (kStringRepresentationMask | kStringEncodingMask));
}
STATIC_ASSERT((kStringRepresentationMask | kStringEncodingMask) ==
Internals::kFullStringRepresentationMask);
STATIC_ASSERT(static_cast<uint32_t>(kStringEncodingMask) ==
Internals::kStringEncodingMask);
bool StringShape::IsSequentialAscii() {
return full_representation_tag() == (kSeqStringTag | kOneByteStringTag);
}
bool StringShape::IsSequentialTwoByte() {
return full_representation_tag() == (kSeqStringTag | kTwoByteStringTag);
}
bool StringShape::IsExternalAscii() {
return full_representation_tag() == (kExternalStringTag | kOneByteStringTag);
}
STATIC_ASSERT((kExternalStringTag | kOneByteStringTag) ==
Internals::kExternalAsciiRepresentationTag);
STATIC_ASSERT(v8::String::ASCII_ENCODING == kOneByteStringTag);
bool StringShape::IsExternalTwoByte() {
return full_representation_tag() == (kExternalStringTag | kTwoByteStringTag);
}
STATIC_ASSERT((kExternalStringTag | kTwoByteStringTag) ==
Internals::kExternalTwoByteRepresentationTag);
STATIC_ASSERT(v8::String::TWO_BYTE_ENCODING == kTwoByteStringTag);
uc32 FlatStringReader::Get(int index) {
ASSERT(0 <= index && index <= length_);
if (is_ascii_) {
return static_cast<const byte*>(start_)[index];
} else {
return static_cast<const uc16*>(start_)[index];
}
}
Handle<Object> StringTableShape::AsHandle(Isolate* isolate, HashTableKey* key) {
return key->AsHandle(isolate);
}
Handle<Object> MapCacheShape::AsHandle(Isolate* isolate, HashTableKey* key) {
return key->AsHandle(isolate);
}
Handle<Object> CompilationCacheShape::AsHandle(Isolate* isolate,
HashTableKey* key) {
return key->AsHandle(isolate);
}
Handle<Object> CodeCacheHashTableShape::AsHandle(Isolate* isolate,
HashTableKey* key) {
return key->AsHandle(isolate);
}
template <typename Char>
class SequentialStringKey : public HashTableKey {
public:
explicit SequentialStringKey(Vector<const Char> string, uint32_t seed)
: string_(string), hash_field_(0), seed_(seed) { }
virtual uint32_t Hash() V8_OVERRIDE {
hash_field_ = StringHasher::HashSequentialString<Char>(string_.start(),
string_.length(),
seed_);
uint32_t result = hash_field_ >> String::kHashShift;
ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
return result;
}
virtual uint32_t HashForObject(Object* other) V8_OVERRIDE {
return String::cast(other)->Hash();
}
Vector<const Char> string_;
uint32_t hash_field_;
uint32_t seed_;
};
class OneByteStringKey : public SequentialStringKey<uint8_t> {
public:
OneByteStringKey(Vector<const uint8_t> str, uint32_t seed)
: SequentialStringKey<uint8_t>(str, seed) { }
virtual bool IsMatch(Object* string) V8_OVERRIDE {
return String::cast(string)->IsOneByteEqualTo(string_);
}
virtual Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE;
};
template<class Char>
class SubStringKey : public HashTableKey {
public:
SubStringKey(Handle<String> string, int from, int length)
: string_(string), from_(from), length_(length) {
if (string_->IsSlicedString()) {
string_ = Handle<String>(Unslice(*string_, &from_));
}
ASSERT(string_->IsSeqString() || string->IsExternalString());
}
virtual uint32_t Hash() V8_OVERRIDE {
ASSERT(length_ >= 0);
ASSERT(from_ + length_ <= string_->length());
const Char* chars = GetChars() + from_;
hash_field_ = StringHasher::HashSequentialString(
chars, length_, string_->GetHeap()->HashSeed());
uint32_t result = hash_field_ >> String::kHashShift;
ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
return result;
}
virtual uint32_t HashForObject(Object* other) V8_OVERRIDE {
return String::cast(other)->Hash();
}
virtual bool IsMatch(Object* string) V8_OVERRIDE;
virtual Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE;
private:
const Char* GetChars();
String* Unslice(String* string, int* offset) {
while (string->IsSlicedString()) {
SlicedString* sliced = SlicedString::cast(string);
*offset += sliced->offset();
string = sliced->parent();
}
return string;
}
Handle<String> string_;
int from_;
int length_;
uint32_t hash_field_;
};
class TwoByteStringKey : public SequentialStringKey<uc16> {
public:
explicit TwoByteStringKey(Vector<const uc16> str, uint32_t seed)
: SequentialStringKey<uc16>(str, seed) { }
virtual bool IsMatch(Object* string) V8_OVERRIDE {
return String::cast(string)->IsTwoByteEqualTo(string_);
}
virtual Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE;
};
// Utf8StringKey carries a vector of chars as key.
class Utf8StringKey : public HashTableKey {
public:
explicit Utf8StringKey(Vector<const char> string, uint32_t seed)
: string_(string), hash_field_(0), seed_(seed) { }
virtual bool IsMatch(Object* string) V8_OVERRIDE {
return String::cast(string)->IsUtf8EqualTo(string_);
}
virtual uint32_t Hash() V8_OVERRIDE {
if (hash_field_ != 0) return hash_field_ >> String::kHashShift;
hash_field_ = StringHasher::ComputeUtf8Hash(string_, seed_, &chars_);
uint32_t result = hash_field_ >> String::kHashShift;
ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
return result;
}
virtual uint32_t HashForObject(Object* other) V8_OVERRIDE {
return String::cast(other)->Hash();
}
virtual Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE {
if (hash_field_ == 0) Hash();
return isolate->factory()->NewInternalizedStringFromUtf8(
string_, chars_, hash_field_);
}
Vector<const char> string_;
uint32_t hash_field_;
int chars_; // Caches the number of characters when computing the hash code.
uint32_t seed_;
};
bool Object::IsNumber() {
return IsSmi() || IsHeapNumber();
}
TYPE_CHECKER(ByteArray, BYTE_ARRAY_TYPE)
TYPE_CHECKER(FreeSpace, FREE_SPACE_TYPE)
bool Object::IsFiller() {
if (!Object::IsHeapObject()) return false;
InstanceType instance_type = HeapObject::cast(this)->map()->instance_type();
return instance_type == FREE_SPACE_TYPE || instance_type == FILLER_TYPE;
}
bool Object::IsExternalArray() {
if (!Object::IsHeapObject())
return false;
InstanceType instance_type =
HeapObject::cast(this)->map()->instance_type();
return (instance_type >= FIRST_EXTERNAL_ARRAY_TYPE &&
instance_type <= LAST_EXTERNAL_ARRAY_TYPE);
}
#define TYPED_ARRAY_TYPE_CHECKER(Type, type, TYPE, ctype, size) \
TYPE_CHECKER(External##Type##Array, EXTERNAL_##TYPE##_ARRAY_TYPE) \
TYPE_CHECKER(Fixed##Type##Array, FIXED_##TYPE##_ARRAY_TYPE)
TYPED_ARRAYS(TYPED_ARRAY_TYPE_CHECKER)
#undef TYPED_ARRAY_TYPE_CHECKER
bool Object::IsFixedTypedArrayBase() {
if (!Object::IsHeapObject()) return false;
InstanceType instance_type =
HeapObject::cast(this)->map()->instance_type();
return (instance_type >= FIRST_FIXED_TYPED_ARRAY_TYPE &&
instance_type <= LAST_FIXED_TYPED_ARRAY_TYPE);
}
bool Object::IsJSReceiver() {
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
return IsHeapObject() &&
HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_RECEIVER_TYPE;
}
bool Object::IsJSObject() {
STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
return IsHeapObject() &&
HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_OBJECT_TYPE;
}
bool Object::IsJSProxy() {
if (!Object::IsHeapObject()) return false;
return HeapObject::cast(this)->map()->IsJSProxyMap();
}
TYPE_CHECKER(JSFunctionProxy, JS_FUNCTION_PROXY_TYPE)
TYPE_CHECKER(JSSet, JS_SET_TYPE)
TYPE_CHECKER(JSMap, JS_MAP_TYPE)
TYPE_CHECKER(JSSetIterator, JS_SET_ITERATOR_TYPE)
TYPE_CHECKER(JSMapIterator, JS_MAP_ITERATOR_TYPE)
TYPE_CHECKER(JSWeakMap, JS_WEAK_MAP_TYPE)
TYPE_CHECKER(JSWeakSet, JS_WEAK_SET_TYPE)
TYPE_CHECKER(JSContextExtensionObject, JS_CONTEXT_EXTENSION_OBJECT_TYPE)
TYPE_CHECKER(Map, MAP_TYPE)
TYPE_CHECKER(FixedArray, FIXED_ARRAY_TYPE)
TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
TYPE_CHECKER(ConstantPoolArray, CONSTANT_POOL_ARRAY_TYPE)
bool Object::IsJSWeakCollection() {
return IsJSWeakMap() || IsJSWeakSet();
}
bool Object::IsDescriptorArray() {
return IsFixedArray();
}
bool Object::IsTransitionArray() {
return IsFixedArray();
}
bool Object::IsDeoptimizationInputData() {
// Must be a fixed array.
if (!IsFixedArray()) return false;
// There's no sure way to detect the difference between a fixed array and
// a deoptimization data array. Since this is used for asserts we can
// check that the length is zero or else the fixed size plus a multiple of
// the entry size.
int length = FixedArray::cast(this)->length();
if (length == 0) return true;
length -= DeoptimizationInputData::kFirstDeoptEntryIndex;
return length >= 0 &&
length % DeoptimizationInputData::kDeoptEntrySize == 0;
}
bool Object::IsDeoptimizationOutputData() {
if (!IsFixedArray()) return false;
// There's actually no way to see the difference between a fixed array and
// a deoptimization data array. Since this is used for asserts we can check
// that the length is plausible though.
if (FixedArray::cast(this)->length() % 2 != 0) return false;
return true;
}
bool Object::IsDependentCode() {
if (!IsFixedArray()) return false;
// There's actually no way to see the difference between a fixed array and
// a dependent codes array.
return true;
}
bool Object::IsContext() {
if (!Object::IsHeapObject()) return false;
Map* map = HeapObject::cast(this)->map();
Heap* heap = map->GetHeap();
return (map == heap->function_context_map() ||
map == heap->catch_context_map() ||
map == heap->with_context_map() ||
map == heap->native_context_map() ||
map == heap->block_context_map() ||
map == heap->module_context_map() ||
map == heap->global_context_map());
}
bool Object::IsNativeContext() {
return Object::IsHeapObject() &&
HeapObject::cast(this)->map() ==
HeapObject::cast(this)->GetHeap()->native_context_map();
}
bool Object::IsScopeInfo() {
return Object::IsHeapObject() &&
HeapObject::cast(this)->map() ==
HeapObject::cast(this)->GetHeap()->scope_info_map();
}
TYPE_CHECKER(JSFunction, JS_FUNCTION_TYPE)
template <> inline bool Is<JSFunction>(Object* obj) {
return obj->IsJSFunction();
}
TYPE_CHECKER(Code, CODE_TYPE)
TYPE_CHECKER(Oddball, ODDBALL_TYPE)
TYPE_CHECKER(Cell, CELL_TYPE)
TYPE_CHECKER(PropertyCell, PROPERTY_CELL_TYPE)
TYPE_CHECKER(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE)
TYPE_CHECKER(JSGeneratorObject, JS_GENERATOR_OBJECT_TYPE)
TYPE_CHECKER(JSModule, JS_MODULE_TYPE)
TYPE_CHECKER(JSValue, JS_VALUE_TYPE)
TYPE_CHECKER(JSDate, JS_DATE_TYPE)
TYPE_CHECKER(JSMessageObject, JS_MESSAGE_OBJECT_TYPE)
bool Object::IsStringWrapper() {
return IsJSValue() && JSValue::cast(this)->value()->IsString();
}
TYPE_CHECKER(Foreign, FOREIGN_TYPE)
bool Object::IsBoolean() {
return IsOddball() &&
((Oddball::cast(this)->kind() & Oddball::kNotBooleanMask) == 0);
}
TYPE_CHECKER(JSArray, JS_ARRAY_TYPE)
TYPE_CHECKER(JSArrayBuffer, JS_ARRAY_BUFFER_TYPE)
TYPE_CHECKER(JSTypedArray, JS_TYPED_ARRAY_TYPE)
TYPE_CHECKER(JSDataView, JS_DATA_VIEW_TYPE)
bool Object::IsJSArrayBufferView() {
return IsJSDataView() || IsJSTypedArray();
}
TYPE_CHECKER(JSRegExp, JS_REGEXP_TYPE)
template <> inline bool Is<JSArray>(Object* obj) {
return obj->IsJSArray();
}
bool Object::IsHashTable() {
return Object::IsHeapObject() &&
HeapObject::cast(this)->map() ==
HeapObject::cast(this)->GetHeap()->hash_table_map();
}
bool Object::IsDictionary() {
return IsHashTable() &&
this != HeapObject::cast(this)->GetHeap()->string_table();
}
bool Object::IsStringTable() {
return IsHashTable();
}
bool Object::IsJSFunctionResultCache() {
if (!IsFixedArray()) return false;
FixedArray* self = FixedArray::cast(this);
int length = self->length();
if (length < JSFunctionResultCache::kEntriesIndex) return false;
if ((length - JSFunctionResultCache::kEntriesIndex)
% JSFunctionResultCache::kEntrySize != 0) {
return false;
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
reinterpret_cast<JSFunctionResultCache*>(this)->
JSFunctionResultCacheVerify();
}
#endif
return true;
}
bool Object::IsNormalizedMapCache() {
return NormalizedMapCache::IsNormalizedMapCache(this);
}
int NormalizedMapCache::GetIndex(Handle<Map> map) {
return map->Hash() % NormalizedMapCache::kEntries;
}
bool NormalizedMapCache::IsNormalizedMapCache(Object* obj) {
if (!obj->IsFixedArray()) return false;
if (FixedArray::cast(obj)->length() != NormalizedMapCache::kEntries) {
return false;
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
reinterpret_cast<NormalizedMapCache*>(obj)->NormalizedMapCacheVerify();
}
#endif
return true;
}
bool Object::IsCompilationCacheTable() {
return IsHashTable();
}
bool Object::IsCodeCacheHashTable() {
return IsHashTable();
}
bool Object::IsPolymorphicCodeCacheHashTable() {
return IsHashTable();
}
bool Object::IsMapCache() {
return IsHashTable();
}
bool Object::IsObjectHashTable() {
return IsHashTable();
}
bool Object::IsOrderedHashTable() {
return IsHeapObject() &&
HeapObject::cast(this)->map() ==
HeapObject::cast(this)->GetHeap()->ordered_hash_table_map();
}
bool Object::IsPrimitive() {
return IsOddball() || IsNumber() || IsString();
}
bool Object::IsJSGlobalProxy() {
bool result = IsHeapObject() &&
(HeapObject::cast(this)->map()->instance_type() ==
JS_GLOBAL_PROXY_TYPE);
ASSERT(!result ||
HeapObject::cast(this)->map()->is_access_check_needed());
return result;
}
bool Object::IsGlobalObject() {
if (!IsHeapObject()) return false;
InstanceType type = HeapObject::cast(this)->map()->instance_type();
return type == JS_GLOBAL_OBJECT_TYPE ||
type == JS_BUILTINS_OBJECT_TYPE;
}
TYPE_CHECKER(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE)
TYPE_CHECKER(JSBuiltinsObject, JS_BUILTINS_OBJECT_TYPE)
bool Object::IsUndetectableObject() {
return IsHeapObject()
&& HeapObject::cast(this)->map()->is_undetectable();
}
bool Object::IsAccessCheckNeeded() {
if (!IsHeapObject()) return false;
if (IsJSGlobalProxy()) {
JSGlobalProxy* proxy = JSGlobalProxy::cast(this);
GlobalObject* global =
proxy->GetIsolate()->context()->global_object();
return proxy->IsDetachedFrom(global);
}
return HeapObject::cast(this)->map()->is_access_check_needed();
}
bool Object::IsStruct() {
if (!IsHeapObject()) return false;
switch (HeapObject::cast(this)->map()->instance_type()) {
#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return true;
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
default: return false;
}
}
#define MAKE_STRUCT_PREDICATE(NAME, Name, name) \
bool Object::Is##Name() { \
return Object::IsHeapObject() \
&& HeapObject::cast(this)->map()->instance_type() == NAME##_TYPE; \
}
STRUCT_LIST(MAKE_STRUCT_PREDICATE)
#undef MAKE_STRUCT_PREDICATE
bool Object::IsUndefined() {
return IsOddball() && Oddball::cast(this)->kind() == Oddball::kUndefined;
}
bool Object::IsNull() {
return IsOddball() && Oddball::cast(this)->kind() == Oddball::kNull;
}
bool Object::IsTheHole() {
return IsOddball() && Oddball::cast(this)->kind() == Oddball::kTheHole;
}
bool Object::IsException() {
return IsOddball() && Oddball::cast(this)->kind() == Oddball::kException;
}
bool Object::IsUninitialized() {
return IsOddball() && Oddball::cast(this)->kind() == Oddball::kUninitialized;
}
bool Object::IsTrue() {
return IsOddball() && Oddball::cast(this)->kind() == Oddball::kTrue;
}
bool Object::IsFalse() {
return IsOddball() && Oddball::cast(this)->kind() == Oddball::kFalse;
}
bool Object::IsArgumentsMarker() {
return IsOddball() && Oddball::cast(this)->kind() == Oddball::kArgumentMarker;
}
double Object::Number() {
ASSERT(IsNumber());
return IsSmi()
? static_cast<double>(reinterpret_cast<Smi*>(this)->value())
: reinterpret_cast<HeapNumber*>(this)->value();
}
bool Object::IsNaN() {
return this->IsHeapNumber() && std::isnan(HeapNumber::cast(this)->value());
}
MaybeHandle<Smi> Object::ToSmi(Isolate* isolate, Handle<Object> object) {
if (object->IsSmi()) return Handle<Smi>::cast(object);
if (object->IsHeapNumber()) {
double value = Handle<HeapNumber>::cast(object)->value();
int int_value = FastD2I(value);
if (value == FastI2D(int_value) && Smi::IsValid(int_value)) {
return handle(Smi::FromInt(int_value), isolate);
}
}
return Handle<Smi>();
}
MaybeHandle<JSReceiver> Object::ToObject(Isolate* isolate,
Handle<Object> object) {
return ToObject(
isolate, object, handle(isolate->context()->native_context(), isolate));
}
bool Object::HasSpecificClassOf(String* name) {
return this->IsJSObject() && (JSObject::cast(this)->class_name() == name);
}
MaybeHandle<Object> Object::GetProperty(Handle<Object> object,
Handle<Name> name) {
LookupIterator it(object, name);
return GetProperty(&it);
}
MaybeHandle<Object> Object::GetElement(Isolate* isolate,
Handle<Object> object,
uint32_t index) {
// GetElement can trigger a getter which can cause allocation.
// This was not always the case. This ASSERT is here to catch
// leftover incorrect uses.
ASSERT(AllowHeapAllocation::IsAllowed());
return Object::GetElementWithReceiver(isolate, object, object, index);
}
MaybeHandle<Object> Object::GetPropertyOrElement(Handle<Object> object,
Handle<Name> name) {
uint32_t index;
Isolate* isolate = name->GetIsolate();
if (name->AsArrayIndex(&index)) return GetElement(isolate, object, index);
return GetProperty(object, name);
}
MaybeHandle<Object> Object::GetProperty(Isolate* isolate,
Handle<Object> object,
const char* name) {
Handle<String> str = isolate->factory()->InternalizeUtf8String(name);
ASSERT(!str.is_null());
#ifdef DEBUG
uint32_t index; // Assert that the name is not an array index.
ASSERT(!str->AsArrayIndex(&index));
#endif // DEBUG
return GetProperty(object, str);
}
MaybeHandle<Object> JSProxy::GetElementWithHandler(Handle<JSProxy> proxy,
Handle<Object> receiver,
uint32_t index) {
return GetPropertyWithHandler(
proxy, receiver, proxy->GetIsolate()->factory()->Uint32ToString(index));
}
MaybeHandle<Object> JSProxy::SetElementWithHandler(Handle<JSProxy> proxy,
Handle<JSReceiver> receiver,
uint32_t index,
Handle<Object> value,
StrictMode strict_mode) {
Isolate* isolate = proxy->GetIsolate();
Handle<String> name = isolate->factory()->Uint32ToString(index);
return SetPropertyWithHandler(
proxy, receiver, name, value, NONE, strict_mode);
}
bool JSProxy::HasElementWithHandler(Handle<JSProxy> proxy, uint32_t index) {
Isolate* isolate = proxy->GetIsolate();
Handle<String> name = isolate->factory()->Uint32ToString(index);
return HasPropertyWithHandler(proxy, name);
}
#define FIELD_ADDR(p, offset) \
(reinterpret_cast<byte*>(p) + offset - kHeapObjectTag)
#define READ_FIELD(p, offset) \
(*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)))
#define ACQUIRE_READ_FIELD(p, offset) \
reinterpret_cast<Object*>(base::Acquire_Load( \
reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset))))
#define NOBARRIER_READ_FIELD(p, offset) \
reinterpret_cast<Object*>(base::NoBarrier_Load( \
reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset))))
#define WRITE_FIELD(p, offset, value) \
(*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value)
#define RELEASE_WRITE_FIELD(p, offset, value) \
base::Release_Store( \
reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
reinterpret_cast<base::AtomicWord>(value));
#define NOBARRIER_WRITE_FIELD(p, offset, value) \
base::NoBarrier_Store( \
reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
reinterpret_cast<base::AtomicWord>(value));
#define WRITE_BARRIER(heap, object, offset, value) \
heap->incremental_marking()->RecordWrite( \
object, HeapObject::RawField(object, offset), value); \
if (heap->InNewSpace(value)) { \
heap->RecordWrite(object->address(), offset); \
}
#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \
if (mode == UPDATE_WRITE_BARRIER) { \
heap->incremental_marking()->RecordWrite( \
object, HeapObject::RawField(object, offset), value); \
if (heap->InNewSpace(value)) { \
heap->RecordWrite(object->address(), offset); \
} \
}
#ifndef V8_TARGET_ARCH_MIPS
#define READ_DOUBLE_FIELD(p, offset) \
(*reinterpret_cast<double*>(FIELD_ADDR(p, offset)))
#else // V8_TARGET_ARCH_MIPS
// Prevent gcc from using load-double (mips ldc1) on (possibly)
// non-64-bit aligned HeapNumber::value.
static inline double read_double_field(void* p, int offset) {
union conversion {
double d;
uint32_t u[2];
} c;
c.u[0] = (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)));
c.u[1] = (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset + 4)));
return c.d;
}
#define READ_DOUBLE_FIELD(p, offset) read_double_field(p, offset)
#endif // V8_TARGET_ARCH_MIPS
#ifndef V8_TARGET_ARCH_MIPS
#define WRITE_DOUBLE_FIELD(p, offset, value) \
(*reinterpret_cast<double*>(FIELD_ADDR(p, offset)) = value)
#else // V8_TARGET_ARCH_MIPS
// Prevent gcc from using store-double (mips sdc1) on (possibly)
// non-64-bit aligned HeapNumber::value.
static inline void write_double_field(void* p, int offset,
double value) {
union conversion {
double d;
uint32_t u[2];
} c;
c.d = value;
(*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset))) = c.u[0];
(*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset + 4))) = c.u[1];
}
#define WRITE_DOUBLE_FIELD(p, offset, value) \
write_double_field(p, offset, value)
#endif // V8_TARGET_ARCH_MIPS
#define READ_INT_FIELD(p, offset) \
(*reinterpret_cast<int*>(FIELD_ADDR(p, offset)))
#define WRITE_INT_FIELD(p, offset, value) \
(*reinterpret_cast<int*>(FIELD_ADDR(p, offset)) = value)
#define READ_INTPTR_FIELD(p, offset) \
(*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)))
#define WRITE_INTPTR_FIELD(p, offset, value) \
(*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)) = value)
#define READ_UINT32_FIELD(p, offset) \
(*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)))
#define WRITE_UINT32_FIELD(p, offset, value) \
(*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)) = value)
#define READ_INT32_FIELD(p, offset) \
(*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset)))
#define WRITE_INT32_FIELD(p, offset, value) \
(*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset)) = value)
#define READ_INT64_FIELD(p, offset) \
(*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)))
#define WRITE_INT64_FIELD(p, offset, value) \
(*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)) = value)
#define READ_SHORT_FIELD(p, offset) \
(*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)))
#define WRITE_SHORT_FIELD(p, offset, value) \
(*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)) = value)
#define READ_BYTE_FIELD(p, offset) \
(*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)))
#define NOBARRIER_READ_BYTE_FIELD(p, offset) \
static_cast<byte>(base::NoBarrier_Load( \
reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset))))
#define WRITE_BYTE_FIELD(p, offset, value) \
(*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)) = value)
#define NOBARRIER_WRITE_BYTE_FIELD(p, offset, value) \
base::NoBarrier_Store( \
reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic8>(value));
Object** HeapObject::RawField(HeapObject* obj, int byte_offset) {
return &READ_FIELD(obj, byte_offset);
}
int Smi::value() {
return Internals::SmiValue(this);
}
Smi* Smi::FromInt(int value) {
ASSERT(Smi::IsValid(value));
return reinterpret_cast<Smi*>(Internals::IntToSmi(value));
}
Smi* Smi::FromIntptr(intptr_t value) {
ASSERT(Smi::IsValid(value));
int smi_shift_bits = kSmiTagSize + kSmiShiftSize;
return reinterpret_cast<Smi*>((value << smi_shift_bits) | kSmiTag);
}
bool Smi::IsValid(intptr_t value) {
bool result = Internals::IsValidSmi(value);
ASSERT_EQ(result, value >= kMinValue && value <= kMaxValue);
return result;
}
MapWord MapWord::FromMap(Map* map) {
return MapWord(reinterpret_cast<uintptr_t>(map));
}
Map* MapWord::ToMap() {
return reinterpret_cast<Map*>(value_);
}
bool MapWord::IsForwardingAddress() {
return HAS_SMI_TAG(reinterpret_cast<Object*>(value_));
}
MapWord MapWord::FromForwardingAddress(HeapObject* object) {
Address raw = reinterpret_cast<Address>(object) - kHeapObjectTag;
return MapWord(reinterpret_cast<uintptr_t>(raw));
}
HeapObject* MapWord::ToForwardingAddress() {
ASSERT(IsForwardingAddress());
return HeapObject::FromAddress(reinterpret_cast<Address>(value_));
}
#ifdef VERIFY_HEAP
void HeapObject::VerifyObjectField(int offset) {
VerifyPointer(READ_FIELD(this, offset));
}
void HeapObject::VerifySmiField(int offset) {
CHECK(READ_FIELD(this, offset)->IsSmi());
}
#endif
Heap* HeapObject::GetHeap() {
Heap* heap =
MemoryChunk::FromAddress(reinterpret_cast<Address>(this))->heap();
SLOW_ASSERT(heap != NULL);
return heap;
}
Isolate* HeapObject::GetIsolate() {
return GetHeap()->isolate();
}
Map* HeapObject::map() {
#ifdef DEBUG
// Clear mark potentially added by PathTracer.
uintptr_t raw_value =
map_word().ToRawValue() & ~static_cast<uintptr_t>(PathTracer::kMarkTag);
return MapWord::FromRawValue(raw_value).ToMap();
#else
return map_word().ToMap();
#endif
}
void HeapObject::set_map(Map* value) {
set_map_word(MapWord::FromMap(value));
if (value != NULL) {
// TODO(1600) We are passing NULL as a slot because maps can never be on
// evacuation candidate.
value->GetHeap()->incremental_marking()->RecordWrite(this, NULL, value);
}
}
Map* HeapObject::synchronized_map() {
return synchronized_map_word().ToMap();
}
void HeapObject::synchronized_set_map(Map* value) {
synchronized_set_map_word(MapWord::FromMap(value));
if (value != NULL) {
// TODO(1600) We are passing NULL as a slot because maps can never be on
// evacuation candidate.
value->GetHeap()->incremental_marking()->RecordWrite(this, NULL, value);
}
}
void HeapObject::synchronized_set_map_no_write_barrier(Map* value) {
synchronized_set_map_word(MapWord::FromMap(value));
}
// Unsafe accessor omitting write barrier.
void HeapObject::set_map_no_write_barrier(Map* value) {
set_map_word(MapWord::FromMap(value));
}
MapWord HeapObject::map_word() {
return MapWord(
reinterpret_cast<uintptr_t>(NOBARRIER_READ_FIELD(this, kMapOffset)));
}
void HeapObject::set_map_word(MapWord map_word) {
NOBARRIER_WRITE_FIELD(
this, kMapOffset, reinterpret_cast<Object*>(map_word.value_));
}
MapWord HeapObject::synchronized_map_word() {
return MapWord(
reinterpret_cast<uintptr_t>(ACQUIRE_READ_FIELD(this, kMapOffset)));
}
void HeapObject::synchronized_set_map_word(MapWord map_word) {
RELEASE_WRITE_FIELD(
this, kMapOffset, reinterpret_cast<Object*>(map_word.value_));
}
HeapObject* HeapObject::FromAddress(Address address) {
ASSERT_TAG_ALIGNED(address);
return reinterpret_cast<HeapObject*>(address + kHeapObjectTag);
}
Address HeapObject::address() {
return reinterpret_cast<Address>(this) - kHeapObjectTag;
}
int HeapObject::Size() {
return SizeFromMap(map());
}
void HeapObject::IteratePointers(ObjectVisitor* v, int start, int end) {
v->VisitPointers(reinterpret_cast<Object**>(FIELD_ADDR(this, start)),
reinterpret_cast<Object**>(FIELD_ADDR(this, end)));
}
void HeapObject::IteratePointer(ObjectVisitor* v, int offset) {
v->VisitPointer(reinterpret_cast<Object**>(FIELD_ADDR(this, offset)));
}
void HeapObject::IterateNextCodeLink(ObjectVisitor* v, int offset) {
v->VisitNextCodeLink(reinterpret_cast<Object**>(FIELD_ADDR(this, offset)));
}
double HeapNumber::value() {
return READ_DOUBLE_FIELD(this, kValueOffset);
}
void HeapNumber::set_value(double value) {
WRITE_DOUBLE_FIELD(this, kValueOffset, value);
}
int HeapNumber::get_exponent() {
return ((READ_INT_FIELD(this, kExponentOffset) & kExponentMask) >>
kExponentShift) - kExponentBias;
}
int HeapNumber::get_sign() {
return READ_INT_FIELD(this, kExponentOffset) & kSignMask;
}
ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)
Object** FixedArray::GetFirstElementAddress() {
return reinterpret_cast<Object**>(FIELD_ADDR(this, OffsetOfElementAt(0)));
}
bool FixedArray::ContainsOnlySmisOrHoles() {
Object* the_hole = GetHeap()->the_hole_value();
Object** current = GetFirstElementAddress();
for (int i = 0; i < length(); ++i) {
Object* candidate = *current++;
if (!candidate->IsSmi() && candidate != the_hole) return false;
}
return true;
}
FixedArrayBase* JSObject::elements() {
Object* array = READ_FIELD(this, kElementsOffset);
return static_cast<FixedArrayBase*>(array);
}
void JSObject::ValidateElements(Handle<JSObject> object) {
#ifdef ENABLE_SLOW_ASSERTS
if (FLAG_enable_slow_asserts) {
ElementsAccessor* accessor = object->GetElementsAccessor();
accessor->Validate(object);
}
#endif
}
void AllocationSite::Initialize() {
set_transition_info(Smi::FromInt(0));
SetElementsKind(GetInitialFastElementsKind());
set_nested_site(Smi::FromInt(0));
set_pretenure_data(Smi::FromInt(0));
set_pretenure_create_count(Smi::FromInt(0));
set_dependent_code(DependentCode::cast(GetHeap()->empty_fixed_array()),
SKIP_WRITE_BARRIER);
}
void AllocationSite::MarkZombie() {
ASSERT(!IsZombie());
Initialize();
set_pretenure_decision(kZombie);
}
// Heuristic: We only need to create allocation site info if the boilerplate
// elements kind is the initial elements kind.
AllocationSiteMode AllocationSite::GetMode(
ElementsKind boilerplate_elements_kind) {
if (FLAG_pretenuring_call_new ||
IsFastSmiElementsKind(boilerplate_elements_kind)) {
return TRACK_ALLOCATION_SITE;
}
return DONT_TRACK_ALLOCATION_SITE;
}
AllocationSiteMode AllocationSite::GetMode(ElementsKind from,
ElementsKind to) {
if (FLAG_pretenuring_call_new ||
(IsFastSmiElementsKind(from) &&
IsMoreGeneralElementsKindTransition(from, to))) {
return TRACK_ALLOCATION_SITE;
}
return DONT_TRACK_ALLOCATION_SITE;
}
inline bool AllocationSite::CanTrack(InstanceType type) {
if (FLAG_allocation_site_pretenuring) {
return type == JS_ARRAY_TYPE ||
type == JS_OBJECT_TYPE ||
type < FIRST_NONSTRING_TYPE;
}
return type == JS_ARRAY_TYPE;
}
inline DependentCode::DependencyGroup AllocationSite::ToDependencyGroup(
Reason reason) {
switch (reason) {
case TENURING:
return DependentCode::kAllocationSiteTenuringChangedGroup;
break;
case TRANSITIONS:
return DependentCode::kAllocationSiteTransitionChangedGroup;
break;
}
UNREACHABLE();
return DependentCode::kAllocationSiteTransitionChangedGroup;
}
inline void AllocationSite::set_memento_found_count(int count) {
int value = pretenure_data()->value();
// Verify that we can count more mementos than we can possibly find in one
// new space collection.
ASSERT((GetHeap()->MaxSemiSpaceSize() /
(StaticVisitorBase::kMinObjectSizeInWords * kPointerSize +
AllocationMemento::kSize)) < MementoFoundCountBits::kMax);
ASSERT(count < MementoFoundCountBits::kMax);
set_pretenure_data(
Smi::FromInt(MementoFoundCountBits::update(value, count)),
SKIP_WRITE_BARRIER);
}
inline bool AllocationSite::IncrementMementoFoundCount() {
if (IsZombie()) return false;
int value = memento_found_count();
set_memento_found_count(value + 1);
return memento_found_count() == kPretenureMinimumCreated;
}
inline void AllocationSite::IncrementMementoCreateCount() {
ASSERT(FLAG_allocation_site_pretenuring);
int value = memento_create_count();
set_memento_create_count(value + 1);
}
inline bool AllocationSite::MakePretenureDecision(
PretenureDecision current_decision,
double ratio,
bool maximum_size_scavenge) {
// Here we just allow state transitions from undecided or maybe tenure
// to don't tenure, maybe tenure, or tenure.
if ((current_decision == kUndecided || current_decision == kMaybeTenure)) {
if (ratio >= kPretenureRatio) {
// We just transition into tenure state when the semi-space was at
// maximum capacity.
if (maximum_size_scavenge) {
set_deopt_dependent_code(true);
set_pretenure_decision(kTenure);
// Currently we just need to deopt when we make a state transition to
// tenure.
return true;
}
set_pretenure_decision(kMaybeTenure);
} else {
set_pretenure_decision(kDontTenure);
}
}
return false;
}
inline bool AllocationSite::DigestPretenuringFeedback(
bool maximum_size_scavenge) {
bool deopt = false;
int create_count = memento_create_count();
int found_count = memento_found_count();
bool minimum_mementos_created = create_count >= kPretenureMinimumCreated;
double ratio =
minimum_mementos_created || FLAG_trace_pretenuring_statistics ?
static_cast<double>(found_count) / create_count : 0.0;
PretenureDecision current_decision = pretenure_decision();
if (minimum_mementos_created) {
deopt = MakePretenureDecision(
current_decision, ratio, maximum_size_scavenge);
}
if (FLAG_trace_pretenuring_statistics) {
PrintF(
"AllocationSite(%p): (created, found, ratio) (%d, %d, %f) %s => %s\n",
static_cast<void*>(this), create_count, found_count, ratio,
PretenureDecisionName(current_decision),
PretenureDecisionName(pretenure_decision()));
}
// Clear feedback calculation fields until the next gc.
set_memento_found_count(0);
set_memento_create_count(0);
return deopt;
}
void JSObject::EnsureCanContainHeapObjectElements(Handle<JSObject> object) {
JSObject::ValidateElements(object);
ElementsKind elements_kind = object->map()->elements_kind();
if (!IsFastObjectElementsKind(elements_kind)) {
if (IsFastHoleyElementsKind(elements_kind)) {
TransitionElementsKind(object, FAST_HOLEY_ELEMENTS);
} else {
TransitionElementsKind(object, FAST_ELEMENTS);
}
}
}
void JSObject::EnsureCanContainElements(Handle<JSObject> object,
Object** objects,
uint32_t count,
EnsureElementsMode mode) {
ElementsKind current_kind = object->map()->elements_kind();
ElementsKind target_kind = current_kind;
{
DisallowHeapAllocation no_allocation;
ASSERT(mode != ALLOW_COPIED_DOUBLE_ELEMENTS);
bool is_holey = IsFastHoleyElementsKind(current_kind);
if (current_kind == FAST_HOLEY_ELEMENTS) return;
Heap* heap = object->GetHeap();
Object* the_hole = heap->the_hole_value();
for (uint32_t i = 0; i < count; ++i) {
Object* current = *objects++;
if (current == the_hole) {
is_holey = true;
target_kind = GetHoleyElementsKind(target_kind);
} else if (!current->IsSmi()) {
if (mode == ALLOW_CONVERTED_DOUBLE_ELEMENTS && current->IsNumber()) {
if (IsFastSmiElementsKind(target_kind)) {
if (is_holey) {
target_kind = FAST_HOLEY_DOUBLE_ELEMENTS;
} else {
target_kind = FAST_DOUBLE_ELEMENTS;
}
}
} else if (is_holey) {
target_kind = FAST_HOLEY_ELEMENTS;
break;
} else {
target_kind = FAST_ELEMENTS;
}
}
}
}
if (target_kind != current_kind) {
TransitionElementsKind(object, target_kind);
}
}
void JSObject::EnsureCanContainElements(Handle<JSObject> object,
Handle<FixedArrayBase> elements,
uint32_t length,
EnsureElementsMode mode) {
Heap* heap = object->GetHeap();
if (elements->map() != heap->fixed_double_array_map()) {
ASSERT(elements->map() == heap->fixed_array_map() ||
elements->map() == heap->fixed_cow_array_map());
if (mode == ALLOW_COPIED_DOUBLE_ELEMENTS) {
mode = DONT_ALLOW_DOUBLE_ELEMENTS;
}
Object** objects =
Handle<FixedArray>::cast(elements)->GetFirstElementAddress();
EnsureCanContainElements(object, objects, length, mode);
return;
}
ASSERT(mode == ALLOW_COPIED_DOUBLE_ELEMENTS);
if (object->GetElementsKind() == FAST_HOLEY_SMI_ELEMENTS) {
TransitionElementsKind(object, FAST_HOLEY_DOUBLE_ELEMENTS);
} else if (object->GetElementsKind() == FAST_SMI_ELEMENTS) {
Handle<FixedDoubleArray> double_array =
Handle<FixedDoubleArray>::cast(elements);
for (uint32_t i = 0; i < length; ++i) {
if (double_array->is_the_hole(i)) {
TransitionElementsKind(object, FAST_HOLEY_DOUBLE_ELEMENTS);
return;
}
}
TransitionElementsKind(object, FAST_DOUBLE_ELEMENTS);
}
}
void JSObject::SetMapAndElements(Handle<JSObject> object,
Handle<Map> new_map,
Handle<FixedArrayBase> value) {
JSObject::MigrateToMap(object, new_map);
ASSERT((object->map()->has_fast_smi_or_object_elements() ||
(*value == object->GetHeap()->empty_fixed_array())) ==
(value->map() == object->GetHeap()->fixed_array_map() ||
value->map() == object->GetHeap()->fixed_cow_array_map()));
ASSERT((*value == object->GetHeap()->empty_fixed_array()) ||
(object->map()->has_fast_double_elements() ==
value->IsFixedDoubleArray()));
object->set_elements(*value);
}
void JSObject::set_elements(FixedArrayBase* value, WriteBarrierMode mode) {
WRITE_FIELD(this, kElementsOffset, value);
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, value, mode);
}
void JSObject::initialize_properties() {
ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
WRITE_FIELD(this, kPropertiesOffset, GetHeap()->empty_fixed_array());
}
void JSObject::initialize_elements() {
FixedArrayBase* elements = map()->GetInitialElements();
WRITE_FIELD(this, kElementsOffset, elements);
}
Handle<String> JSObject::ExpectedTransitionKey(Handle<Map> map) {
DisallowHeapAllocation no_gc;
if (!map->HasTransitionArray()) return Handle<String>::null();
TransitionArray* transitions = map->transitions();
if (!transitions->IsSimpleTransition()) return Handle<String>::null();
int transition = TransitionArray::kSimpleTransitionIndex;
PropertyDetails details = transitions->GetTargetDetails(transition);
Name* name = transitions->GetKey(transition);
if (details.type() != FIELD) return Handle<String>::null();
if (details.attributes() != NONE) return Handle<String>::null();
if (!name->IsString()) return Handle<String>::null();
return Handle<String>(String::cast(name));
}
Handle<Map> JSObject::ExpectedTransitionTarget(Handle<Map> map) {
ASSERT(!ExpectedTransitionKey(map).is_null());
return Handle<Map>(map->transitions()->GetTarget(
TransitionArray::kSimpleTransitionIndex));
}
Handle<Map> JSObject::FindTransitionToField(Handle<Map> map, Handle<Name> key) {
DisallowHeapAllocation no_allocation;
if (!map->HasTransitionArray()) return Handle<Map>::null();
TransitionArray* transitions = map->transitions();
int transition = transitions->Search(*key);
if (transition == TransitionArray::kNotFound) return Handle<Map>::null();
PropertyDetails target_details = transitions->GetTargetDetails(transition);
if (target_details.type() != FIELD) return Handle<Map>::null();
if (target_details.attributes() != NONE) return Handle<Map>::null();
return Handle<Map>(transitions->GetTarget(transition));
}
ACCESSORS(Oddball, to_string, String, kToStringOffset)
ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
byte Oddball::kind() {
return Smi::cast(READ_FIELD(this, kKindOffset))->value();
}
void Oddball::set_kind(byte value) {
WRITE_FIELD(this, kKindOffset, Smi::FromInt(value));
}
Object* Cell::value() {
return READ_FIELD(this, kValueOffset);
}
void Cell::set_value(Object* val, WriteBarrierMode ignored) {
// The write barrier is not used for global property cells.
ASSERT(!val->IsPropertyCell() && !val->IsCell());
WRITE_FIELD(this, kValueOffset, val);
}
ACCESSORS(PropertyCell, dependent_code, DependentCode, kDependentCodeOffset)
Object* PropertyCell::type_raw() {
return READ_FIELD(this, kTypeOffset);
}
void PropertyCell::set_type_raw(Object* val, WriteBarrierMode ignored) {
WRITE_FIELD(this, kTypeOffset, val);
}
int JSObject::GetHeaderSize() {
InstanceType type = map()->instance_type();
// Check for the most common kind of JavaScript object before
// falling into the generic switch. This speeds up the internal
// field operations considerably on average.
if (type == JS_OBJECT_TYPE) return JSObject::kHeaderSize;
switch (type) {
case JS_GENERATOR_OBJECT_TYPE:
return JSGeneratorObject::kSize;
case JS_MODULE_TYPE:
return JSModule::kSize;
case JS_GLOBAL_PROXY_TYPE:
return JSGlobalProxy::kSize;
case JS_GLOBAL_OBJECT_TYPE:
return JSGlobalObject::kSize;
case JS_BUILTINS_OBJECT_TYPE:
return JSBuiltinsObject::kSize;
case JS_FUNCTION_TYPE:
return JSFunction::kSize;
case JS_VALUE_TYPE:
return JSValue::kSize;
case JS_DATE_TYPE:
return JSDate::kSize;
case JS_ARRAY_TYPE:
return JSArray::kSize;
case JS_ARRAY_BUFFER_TYPE:
return JSArrayBuffer::kSize;
case JS_TYPED_ARRAY_TYPE:
return JSTypedArray::kSize;
case JS_DATA_VIEW_TYPE:
return JSDataView::kSize;
case JS_SET_TYPE:
return JSSet::kSize;
case JS_MAP_TYPE:
return JSMap::kSize;
case JS_SET_ITERATOR_TYPE:
return JSSetIterator::kSize;
case JS_MAP_ITERATOR_TYPE:
return JSMapIterator::kSize;
case JS_WEAK_MAP_TYPE:
return JSWeakMap::kSize;
case JS_WEAK_SET_TYPE:
return JSWeakSet::kSize;
case JS_REGEXP_TYPE:
return JSRegExp::kSize;
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
return JSObject::kHeaderSize;
case JS_MESSAGE_OBJECT_TYPE:
return JSMessageObject::kSize;
default:
// TODO(jkummerow): Re-enable this. Blink currently hits this
// from its CustomElementConstructorBuilder.
// UNREACHABLE();
return 0;
}
}
int JSObject::GetInternalFieldCount() {
ASSERT(1 << kPointerSizeLog2 == kPointerSize);
// Make sure to adjust for the number of in-object properties. These
// properties do contribute to the size, but are not internal fields.
return ((Size() - GetHeaderSize()) >> kPointerSizeLog2) -
map()->inobject_properties();
}
int JSObject::GetInternalFieldOffset(int index) {
ASSERT(index < GetInternalFieldCount() && index >= 0);
return GetHeaderSize() + (kPointerSize * index);
}
Object* JSObject::GetInternalField(int index) {
ASSERT(index < GetInternalFieldCount() && index >= 0);
// Internal objects do follow immediately after the header, whereas in-object
// properties are at the end of the object. Therefore there is no need
// to adjust the index here.
return READ_FIELD(this, GetHeaderSize() + (kPointerSize * index));
}
void JSObject::SetInternalField(int index, Object* value) {
ASSERT(index < GetInternalFieldCount() && index >= 0);
// Internal objects do follow immediately after the header, whereas in-object
// properties are at the end of the object. Therefore there is no need
// to adjust the index here.
int offset = GetHeaderSize() + (kPointerSize * index);
WRITE_FIELD(this, offset, value);
WRITE_BARRIER(GetHeap(), this, offset, value);
}
void JSObject::SetInternalField(int index, Smi* value) {
ASSERT(index < GetInternalFieldCount() && index >= 0);
// Internal objects do follow immediately after the header, whereas in-object
// properties are at the end of the object. Therefore there is no need
// to adjust the index here.
int offset = GetHeaderSize() + (kPointerSize * index);
WRITE_FIELD(this, offset, value);
}
// Access fast-case object properties at index. The use of these routines
// is needed to correctly distinguish between properties stored in-object and
// properties stored in the properties array.
Object* JSObject::RawFastPropertyAt(FieldIndex index) {
if (index.is_inobject()) {
return READ_FIELD(this, index.offset());
} else {
return properties()->get(index.outobject_array_index());
}
}
void JSObject::FastPropertyAtPut(FieldIndex index, Object* value) {
if (index.is_inobject()) {
int offset = index.offset();
WRITE_FIELD(this, offset, value);
WRITE_BARRIER(GetHeap(), this, offset, value);
} else {
properties()->set(index.outobject_array_index(), value);
}
}
int JSObject::GetInObjectPropertyOffset(int index) {
return map()->GetInObjectPropertyOffset(index);
}
Object* JSObject::InObjectPropertyAt(int index) {
int offset = GetInObjectPropertyOffset(index);
return READ_FIELD(this, offset);
}
Object* JSObject::InObjectPropertyAtPut(int index,
Object* value,
WriteBarrierMode mode) {
// Adjust for the number of properties stored in the object.
int offset = GetInObjectPropertyOffset(index);
WRITE_FIELD(this, offset, value);
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
return value;
}
void JSObject::InitializeBody(Map* map,
Object* pre_allocated_value,
Object* filler_value) {
ASSERT(!filler_value->IsHeapObject() ||
!GetHeap()->InNewSpace(filler_value));
ASSERT(!pre_allocated_value->IsHeapObject() ||
!GetHeap()->InNewSpace(pre_allocated_value));
int size = map->instance_size();
int offset = kHeaderSize;
if (filler_value != pre_allocated_value) {
int pre_allocated = map->pre_allocated_property_fields();
ASSERT(pre_allocated * kPointerSize + kHeaderSize <= size);
for (int i = 0; i < pre_allocated; i++) {
WRITE_FIELD(this, offset, pre_allocated_value);
offset += kPointerSize;
}
}
while (offset < size) {
WRITE_FIELD(this, offset, filler_value);
offset += kPointerSize;
}
}
bool JSObject::HasFastProperties() {
ASSERT(properties()->IsDictionary() == map()->is_dictionary_map());
return !properties()->IsDictionary();
}
bool JSObject::TooManyFastProperties(StoreFromKeyed store_mode) {
// Allow extra fast properties if the object has more than
// kFastPropertiesSoftLimit in-object properties. When this is the case, it is
// very unlikely that the object is being used as a dictionary and there is a
// good chance that allowing more map transitions will be worth it.
Map* map = this->map();
if (map->unused_property_fields() != 0) return false;
int inobject = map->inobject_properties();
int limit;
if (store_mode == CERTAINLY_NOT_STORE_FROM_KEYED) {
limit = Max(inobject, kMaxFastProperties);
} else {
limit = Max(inobject, kFastPropertiesSoftLimit);
}
return properties()->length() > limit;
}
void Struct::InitializeBody(int object_size) {
Object* value = GetHeap()->undefined_value();
for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
WRITE_FIELD(this, offset, value);
}
}
bool Object::ToArrayIndex(uint32_t* index) {
if (IsSmi()) {
int value = Smi::cast(this)->value();
if (value < 0) return false;
*index = value;
return true;
}
if (IsHeapNumber()) {
double value = HeapNumber::cast(this)->value();
uint32_t uint_value = static_cast<uint32_t>(value);
if (value == static_cast<double>(uint_value)) {
*index = uint_value;
return true;
}
}
return false;
}
bool Object::IsStringObjectWithCharacterAt(uint32_t index) {
if (!this->IsJSValue()) return false;
JSValue* js_value = JSValue::cast(this);
if (!js_value->value()->IsString()) return false;
String* str = String::cast(js_value->value());
if (index >= static_cast<uint32_t>(str->length())) return false;
return true;
}
void Object::VerifyApiCallResultType() {
#if ENABLE_EXTRA_CHECKS
if (!(IsSmi() ||
IsString() ||
IsSymbol() ||
IsSpecObject() ||
IsHeapNumber() ||
IsUndefined() ||
IsTrue() ||
IsFalse() ||
IsNull())) {
FATAL("API call returned invalid object");
}
#endif // ENABLE_EXTRA_CHECKS
}
FixedArrayBase* FixedArrayBase::cast(Object* object) {
ASSERT(object->IsFixedArrayBase());
return reinterpret_cast<FixedArrayBase*>(object);
}
Object* FixedArray::get(int index) {
SLOW_ASSERT(index >= 0 && index < this->length());
return READ_FIELD(this, kHeaderSize + index * kPointerSize);
}
Handle<Object> FixedArray::get(Handle<FixedArray> array, int index) {
return handle(array->get(index), array->GetIsolate());
}
bool FixedArray::is_the_hole(int index) {
return get(index) == GetHeap()->the_hole_value();
}
void FixedArray::set(int index, Smi* value) {
ASSERT(map() != GetHeap()->fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
ASSERT(reinterpret_cast<Object*>(value)->IsSmi());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
}
void FixedArray::set(int index, Object* value) {
ASSERT(map() != GetHeap()->fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
WRITE_BARRIER(GetHeap(), this, offset, value);
}
inline bool FixedDoubleArray::is_the_hole_nan(double value) {
return BitCast<uint64_t, double>(value) == kHoleNanInt64;
}
inline double FixedDoubleArray::hole_nan_as_double() {
return BitCast<double, uint64_t>(kHoleNanInt64);
}
inline double FixedDoubleArray::canonical_not_the_hole_nan_as_double() {
ASSERT(BitCast<uint64_t>(OS::nan_value()) != kHoleNanInt64);
ASSERT((BitCast<uint64_t>(OS::nan_value()) >> 32) != kHoleNanUpper32);
return OS::nan_value();
}
double FixedDoubleArray::get_scalar(int index) {
ASSERT(map() != GetHeap()->fixed_cow_array_map() &&
map() != GetHeap()->fixed_array_map());
ASSERT(index >= 0 && index < this->length());
double result = READ_DOUBLE_FIELD(this, kHeaderSize + index * kDoubleSize);
ASSERT(!is_the_hole_nan(result));
return result;
}
int64_t FixedDoubleArray::get_representation(int index) {
ASSERT(map() != GetHeap()->fixed_cow_array_map() &&
map() != GetHeap()->fixed_array_map());
ASSERT(index >= 0 && index < this->length());
return READ_INT64_FIELD(this, kHeaderSize + index * kDoubleSize);
}
Handle<Object> FixedDoubleArray::get(Handle<FixedDoubleArray> array,
int index) {
if (array->is_the_hole(index)) {
return array->GetIsolate()->factory()->the_hole_value();
} else {
return array->GetIsolate()->factory()->NewNumber(array->get_scalar(index));
}
}
void FixedDoubleArray::set(int index, double value) {
ASSERT(map() != GetHeap()->fixed_cow_array_map() &&
map() != GetHeap()->fixed_array_map());
int offset = kHeaderSize + index * kDoubleSize;
if (std::isnan(value)) value = canonical_not_the_hole_nan_as_double();
WRITE_DOUBLE_FIELD(this, offset, value);
}
void FixedDoubleArray::set_the_hole(int index) {
ASSERT(map() != GetHeap()->fixed_cow_array_map() &&
map() != GetHeap()->fixed_array_map());
int offset = kHeaderSize + index * kDoubleSize;
WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
}
bool FixedDoubleArray::is_the_hole(int index) {
int offset = kHeaderSize + index * kDoubleSize;
return is_the_hole_nan(READ_DOUBLE_FIELD(this, offset));
}
double* FixedDoubleArray::data_start() {
return reinterpret_cast<double*>(FIELD_ADDR(this, kHeaderSize));
}
void FixedDoubleArray::FillWithHoles(int from, int to) {
for (int i = from; i < to; i++) {
set_the_hole(i);
}
}
bool ConstantPoolArray::is_extended_layout() {
uint32_t small_layout_1 = READ_UINT32_FIELD(this, kSmallLayout1Offset);
return IsExtendedField::decode(small_layout_1);
}
ConstantPoolArray::LayoutSection ConstantPoolArray::final_section() {
return is_extended_layout() ? EXTENDED_SECTION : SMALL_SECTION;
}
int ConstantPoolArray::first_extended_section_index() {
ASSERT(is_extended_layout());
uint32_t small_layout_2 = READ_UINT32_FIELD(this, kSmallLayout2Offset);
return TotalCountField::decode(small_layout_2);
}
int ConstantPoolArray::get_extended_section_header_offset() {
return RoundUp(SizeFor(NumberOfEntries(this, SMALL_SECTION)), kInt64Size);
}
ConstantPoolArray::WeakObjectState ConstantPoolArray::get_weak_object_state() {
uint32_t small_layout_2 = READ_UINT32_FIELD(this, kSmallLayout2Offset);
return WeakObjectStateField::decode(small_layout_2);
}
void ConstantPoolArray::set_weak_object_state(
ConstantPoolArray::WeakObjectState state) {
uint32_t small_layout_2 = READ_UINT32_FIELD(this, kSmallLayout2Offset);
small_layout_2 = WeakObjectStateField::update(small_layout_2, state);
WRITE_INT32_FIELD(this, kSmallLayout2Offset, small_layout_2);
}
int ConstantPoolArray::first_index(Type type, LayoutSection section) {
int index = 0;
if (section == EXTENDED_SECTION) {
ASSERT(is_extended_layout());
index += first_extended_section_index();
}
for (Type type_iter = FIRST_TYPE; type_iter < type;
type_iter = next_type(type_iter)) {
index += number_of_entries(type_iter, section);
}
return index;
}
int ConstantPoolArray::last_index(Type type, LayoutSection section) {
return first_index(type, section) + number_of_entries(type, section) - 1;
}
int ConstantPoolArray::number_of_entries(Type type, LayoutSection section) {
if (section == SMALL_SECTION) {
uint32_t small_layout_1 = READ_UINT32_FIELD(this, kSmallLayout1Offset);
uint32_t small_layout_2 = READ_UINT32_FIELD(this, kSmallLayout2Offset);
switch (type) {
case INT64:
return Int64CountField::decode(small_layout_1);
case CODE_PTR:
return CodePtrCountField::decode(small_layout_1);
case HEAP_PTR:
return HeapPtrCountField::decode(small_layout_1);
case INT32:
return Int32CountField::decode(small_layout_2);
default:
UNREACHABLE();
return 0;
}
} else {
ASSERT(section == EXTENDED_SECTION && is_extended_layout());
int offset = get_extended_section_header_offset();
switch (type) {
case INT64:
offset += kExtendedInt64CountOffset;
break;
case CODE_PTR:
offset += kExtendedCodePtrCountOffset;
break;
case HEAP_PTR:
offset += kExtendedHeapPtrCountOffset;
break;
case INT32:
offset += kExtendedInt32CountOffset;
break;
default:
UNREACHABLE();
}
return READ_INT_FIELD(this, offset);
}
}
ConstantPoolArray::Type ConstantPoolArray::get_type(int index) {
LayoutSection section;
if (is_extended_layout() && index >= first_extended_section_index()) {
section = EXTENDED_SECTION;
} else {
section = SMALL_SECTION;
}
Type type = FIRST_TYPE;
while (index > last_index(type, section)) {
type = next_type(type);
}
ASSERT(type <= LAST_TYPE);
return type;
}
int64_t ConstantPoolArray::get_int64_entry(int index) {
ASSERT(map() == GetHeap()->constant_pool_array_map());
ASSERT(get_type(index) == INT64);
return READ_INT64_FIELD(this, OffsetOfElementAt(index));
}
double ConstantPoolArray::get_int64_entry_as_double(int index) {
STATIC_ASSERT(kDoubleSize == kInt64Size);
ASSERT(map() == GetHeap()->constant_pool_array_map());
ASSERT(get_type(index) == INT64);
return READ_DOUBLE_FIELD(this, OffsetOfElementAt(index));
}
Address ConstantPoolArray::get_code_ptr_entry(int index) {
ASSERT(map() == GetHeap()->constant_pool_array_map());
ASSERT(get_type(index) == CODE_PTR);
return reinterpret_cast<Address>(READ_FIELD(this, OffsetOfElementAt(index)));
}
Object* ConstantPoolArray::get_heap_ptr_entry(int index) {
ASSERT(map() == GetHeap()->constant_pool_array_map());
ASSERT(get_type(index) == HEAP_PTR);
return READ_FIELD(this, OffsetOfElementAt(index));
}
int32_t ConstantPoolArray::get_int32_entry(int index) {
ASSERT(map() == GetHeap()->constant_pool_array_map());
ASSERT(get_type(index) == INT32);
return READ_INT32_FIELD(this, OffsetOfElementAt(index));
}
void ConstantPoolArray::set(int index, int64_t value) {
ASSERT(map() == GetHeap()->constant_pool_array_map());
ASSERT(get_type(index) == INT64);
WRITE_INT64_FIELD(this, OffsetOfElementAt(index), value);
}
void ConstantPoolArray::set(int index, double value) {
STATIC_ASSERT(kDoubleSize == kInt64Size);
ASSERT(map() == GetHeap()->constant_pool_array_map());
ASSERT(get_type(index) == INT64);
WRITE_DOUBLE_FIELD(this, OffsetOfElementAt(index), value);
}
void ConstantPoolArray::set(int index, Address value) {
ASSERT(map() == GetHeap()->constant_pool_array_map());
ASSERT(get_type(index) == CODE_PTR);
WRITE_FIELD(this, OffsetOfElementAt(index), reinterpret_cast<Object*>(value));
}
void ConstantPoolArray::set(int index, Object* value) {
ASSERT(map() == GetHeap()->constant_pool_array_map());
ASSERT(get_type(index) == HEAP_PTR);
WRITE_FIELD(this, OffsetOfElementAt(index), value);
WRITE_BARRIER(GetHeap(), this, OffsetOfElementAt(index), value);
}
void ConstantPoolArray::set(int index, int32_t value) {
ASSERT(map() == GetHeap()->constant_pool_array_map());
ASSERT(get_type(index) == INT32);
WRITE_INT32_FIELD(this, OffsetOfElementAt(index), value);
}
void ConstantPoolArray::Init(const NumberOfEntries& small) {
uint32_t small_layout_1 =
Int64CountField::encode(small.count_of(INT64)) |
CodePtrCountField::encode(small.count_of(CODE_PTR)) |
HeapPtrCountField::encode(small.count_of(HEAP_PTR)) |
IsExtendedField::encode(false);
uint32_t small_layout_2 =
Int32CountField::encode(small.count_of(INT32)) |
TotalCountField::encode(small.total_count()) |
WeakObjectStateField::encode(NO_WEAK_OBJECTS);
WRITE_UINT32_FIELD(this, kSmallLayout1Offset, small_layout_1);
WRITE_UINT32_FIELD(this, kSmallLayout2Offset, small_layout_2);
if (kHeaderSize != kFirstEntryOffset) {
ASSERT(kFirstEntryOffset - kHeaderSize == kInt32Size);
WRITE_UINT32_FIELD(this, kHeaderSize, 0); // Zero out header padding.
}
}
void ConstantPoolArray::InitExtended(const NumberOfEntries& small,
const NumberOfEntries& extended) {
// Initialize small layout fields first.
Init(small);
// Set is_extended_layout field.
uint32_t small_layout_1 = READ_UINT32_FIELD(this, kSmallLayout1Offset);
small_layout_1 = IsExtendedField::update(small_layout_1, true);
WRITE_INT32_FIELD(this, kSmallLayout1Offset, small_layout_1);
// Initialize the extended layout fields.
int extended_header_offset = get_extended_section_header_offset();
WRITE_INT_FIELD(this, extended_header_offset + kExtendedInt64CountOffset,
extended.count_of(INT64));
WRITE_INT_FIELD(this, extended_header_offset + kExtendedCodePtrCountOffset,
extended.count_of(CODE_PTR));
WRITE_INT_FIELD(this, extended_header_offset + kExtendedHeapPtrCountOffset,
extended.count_of(HEAP_PTR));
WRITE_INT_FIELD(this, extended_header_offset + kExtendedInt32CountOffset,
extended.count_of(INT32));
}
int ConstantPoolArray::size() {
NumberOfEntries small(this, SMALL_SECTION);
if (!is_extended_layout()) {
return SizeFor(small);
} else {
NumberOfEntries extended(this, EXTENDED_SECTION);
return SizeForExtended(small, extended);
}
}
int ConstantPoolArray::length() {
uint32_t small_layout_2 = READ_UINT32_FIELD(this, kSmallLayout2Offset);
int length = TotalCountField::decode(small_layout_2);
if (is_extended_layout()) {
length += number_of_entries(INT64, EXTENDED_SECTION) +
number_of_entries(CODE_PTR, EXTENDED_SECTION) +
number_of_entries(HEAP_PTR, EXTENDED_SECTION) +
number_of_entries(INT32, EXTENDED_SECTION);
}
return length;
}
int ConstantPoolArray::Iterator::next_index() {
ASSERT(!is_finished());
int ret = next_index_++;
update_section();
return ret;
}
bool ConstantPoolArray::Iterator::is_finished() {
return next_index_ > array_->last_index(type_, final_section_);
}
void ConstantPoolArray::Iterator::update_section() {
if (next_index_ > array_->last_index(type_, current_section_) &&
current_section_ != final_section_) {
ASSERT(final_section_ == EXTENDED_SECTION);
current_section_ = EXTENDED_SECTION;
next_index_ = array_->first_index(type_, EXTENDED_SECTION);
}
}
WriteBarrierMode HeapObject::GetWriteBarrierMode(
const DisallowHeapAllocation& promise) {
Heap* heap = GetHeap();
if (heap->incremental_marking()->IsMarking()) return UPDATE_WRITE_BARRIER;
if (heap->InNewSpace(this)) return SKIP_WRITE_BARRIER;
return UPDATE_WRITE_BARRIER;
}
void FixedArray::set(int index,
Object* value,
WriteBarrierMode mode) {
ASSERT(map() != GetHeap()->fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(this, offset, value);
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
}
void FixedArray::NoIncrementalWriteBarrierSet(FixedArray* array,
int index,
Object* value) {
ASSERT(array->map() != array->GetHeap()->fixed_cow_array_map());
ASSERT(index >= 0 && index < array->length());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(array, offset, value);
Heap* heap = array->GetHeap();
if (heap->InNewSpace(value)) {
heap->RecordWrite(array->address(), offset);
}
}
void FixedArray::NoWriteBarrierSet(FixedArray* array,
int index,
Object* value) {
ASSERT(array->map() != array->GetHeap()->fixed_cow_array_map());
ASSERT(index >= 0 && index < array->length());
ASSERT(!array->GetHeap()->InNewSpace(value));
WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
}
void FixedArray::set_undefined(int index) {
ASSERT(map() != GetHeap()->fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
ASSERT(!GetHeap()->InNewSpace(GetHeap()->undefined_value()));
WRITE_FIELD(this,
kHeaderSize + index * kPointerSize,
GetHeap()->undefined_value());
}
void FixedArray::set_null(int index) {
ASSERT(index >= 0 && index < this->length());
ASSERT(!GetHeap()->InNewSpace(GetHeap()->null_value()));
WRITE_FIELD(this,
kHeaderSize + index * kPointerSize,
GetHeap()->null_value());
}
void FixedArray::set_the_hole(int index) {
ASSERT(map() != GetHeap()->fixed_cow_array_map());
ASSERT(index >= 0 && index < this->length());
ASSERT(!GetHeap()->InNewSpace(GetHeap()->the_hole_value()));
WRITE_FIELD(this,
kHeaderSize + index * kPointerSize,
GetHeap()->the_hole_value());
}
void FixedArray::FillWithHoles(int from, int to) {
for (int i = from; i < to; i++) {
set_the_hole(i);
}
}
Object** FixedArray::data_start() {
return HeapObject::RawField(this, kHeaderSize);
}
bool DescriptorArray::IsEmpty() {
ASSERT(length() >= kFirstIndex ||
this == GetHeap()->empty_descriptor_array());
return length() < kFirstIndex;
}
void DescriptorArray::SetNumberOfDescriptors(int number_of_descriptors) {
WRITE_FIELD(
this, kDescriptorLengthOffset, Smi::FromInt(number_of_descriptors));
}
// Perform a binary search in a fixed array. Low and high are entry indices. If
// there are three entries in this array it should be called with low=0 and
// high=2.
template<SearchMode search_mode, typename T>
int BinarySearch(T* array, Name* name, int low, int high, int valid_entries) {
uint32_t hash = name->Hash();
int limit = high;
ASSERT(low <= high);
while (low != high) {
int mid = (low + high) / 2;
Name* mid_name = array->GetSortedKey(mid);
uint32_t mid_hash = mid_name->Hash();
if (mid_hash >= hash) {
high = mid;
} else {
low = mid + 1;
}
}
for (; low <= limit; ++low) {
int sort_index = array->GetSortedKeyIndex(low);
Name* entry = array->GetKey(sort_index);
if (entry->Hash() != hash) break;
if (entry->Equals(name)) {
if (search_mode == ALL_ENTRIES || sort_index < valid_entries) {