blob: fa941814d1cba540909219a8fc8adfd7ffc1ecca [file] [log] [blame]
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#include "src/accessors.h"
#include "src/api.h"
#include "src/base/once.h"
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/compilation-cache.h"
#include "src/conversions.h"
#include "src/cpu-profiler.h"
#include "src/debug.h"
#include "src/deoptimizer.h"
#include "src/global-handles.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/store-buffer.h"
#include "src/heap-profiler.h"
#include "src/isolate-inl.h"
#include "src/natives.h"
#include "src/runtime-profiler.h"
#include "src/scopeinfo.h"
#include "src/snapshot.h"
#include "src/utils.h"
#include "src/v8threads.h"
#include "src/vm-state-inl.h"
#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
#include "src/regexp-macro-assembler.h" // NOLINT
#include "src/arm/regexp-macro-assembler-arm.h" // NOLINT
#endif
#if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
#include "src/regexp-macro-assembler.h" // NOLINT
#include "src/mips/regexp-macro-assembler-mips.h" // NOLINT
#endif
#if V8_TARGET_ARCH_MIPS64 && !V8_INTERPRETED_REGEXP
#include "src/regexp-macro-assembler.h"
#include "src/mips64/regexp-macro-assembler-mips64.h"
#endif
namespace v8 {
namespace internal {
Heap::Heap()
: amount_of_external_allocated_memory_(0),
amount_of_external_allocated_memory_at_last_global_gc_(0),
isolate_(NULL),
code_range_size_(0),
// semispace_size_ should be a power of 2 and old_generation_size_ should
// be a multiple of Page::kPageSize.
reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
max_semi_space_size_(8 * (kPointerSize / 4) * MB),
initial_semispace_size_(Page::kPageSize),
max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
max_executable_size_(256ul * (kPointerSize / 4) * MB),
// Variables set based on semispace_size_ and old_generation_size_ in
// ConfigureHeap.
// Will be 4 * reserved_semispace_size_ to ensure that young
// generation can be aligned to its size.
maximum_committed_(0),
survived_since_last_expansion_(0),
sweep_generation_(0),
always_allocate_scope_depth_(0),
contexts_disposed_(0),
global_ic_age_(0),
flush_monomorphic_ics_(false),
scan_on_scavenge_pages_(0),
new_space_(this),
old_pointer_space_(NULL),
old_data_space_(NULL),
code_space_(NULL),
map_space_(NULL),
cell_space_(NULL),
property_cell_space_(NULL),
lo_space_(NULL),
gc_state_(NOT_IN_GC),
gc_post_processing_depth_(0),
allocations_count_(0),
raw_allocations_hash_(0),
dump_allocations_hash_countdown_(FLAG_dump_allocations_digest_at_alloc),
ms_count_(0),
gc_count_(0),
remembered_unmapped_pages_index_(0),
unflattened_strings_length_(0),
#ifdef DEBUG
allocation_timeout_(0),
#endif // DEBUG
old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
old_gen_exhausted_(false),
inline_allocation_disabled_(false),
store_buffer_rebuilder_(store_buffer()),
hidden_string_(NULL),
gc_safe_size_of_old_object_(NULL),
total_regexp_code_generated_(0),
tracer_(this),
high_survival_rate_period_length_(0),
promoted_objects_size_(0),
promotion_rate_(0),
semi_space_copied_object_size_(0),
semi_space_copied_rate_(0),
nodes_died_in_new_space_(0),
nodes_copied_in_new_space_(0),
nodes_promoted_(0),
maximum_size_scavenges_(0),
max_gc_pause_(0.0),
total_gc_time_ms_(0.0),
max_alive_after_gc_(0),
min_in_mutator_(kMaxInt),
marking_time_(0.0),
sweeping_time_(0.0),
mark_compact_collector_(this),
store_buffer_(this),
marking_(this),
incremental_marking_(this),
number_idle_notifications_(0),
last_idle_notification_gc_count_(0),
last_idle_notification_gc_count_init_(false),
mark_sweeps_since_idle_round_started_(0),
gc_count_at_last_idle_gc_(0),
scavenges_since_last_idle_round_(kIdleScavengeThreshold),
full_codegen_bytes_generated_(0),
crankshaft_codegen_bytes_generated_(0),
gcs_since_last_deopt_(0),
#ifdef VERIFY_HEAP
no_weak_object_verification_scope_depth_(0),
#endif
allocation_sites_scratchpad_length_(0),
promotion_queue_(this),
configured_(false),
external_string_table_(this),
chunks_queued_for_free_(NULL),
gc_callbacks_depth_(0) {
// Allow build-time customization of the max semispace size. Building
// V8 with snapshots and a non-default max semispace size is much
// easier if you can define it as part of the build environment.
#if defined(V8_MAX_SEMISPACE_SIZE)
max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
#endif
// Ensure old_generation_size_ is a multiple of kPageSize.
DCHECK(MB >= Page::kPageSize);
memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
set_native_contexts_list(NULL);
set_array_buffers_list(Smi::FromInt(0));
set_allocation_sites_list(Smi::FromInt(0));
set_encountered_weak_collections(Smi::FromInt(0));
// Put a dummy entry in the remembered pages so we can find the list the
// minidump even if there are no real unmapped pages.
RememberUnmappedPage(NULL, false);
ClearObjectStats(true);
}
intptr_t Heap::Capacity() {
if (!HasBeenSetUp()) return 0;
return new_space_.Capacity() + old_pointer_space_->Capacity() +
old_data_space_->Capacity() + code_space_->Capacity() +
map_space_->Capacity() + cell_space_->Capacity() +
property_cell_space_->Capacity();
}
intptr_t Heap::CommittedMemory() {
if (!HasBeenSetUp()) return 0;
return new_space_.CommittedMemory() + old_pointer_space_->CommittedMemory() +
old_data_space_->CommittedMemory() + code_space_->CommittedMemory() +
map_space_->CommittedMemory() + cell_space_->CommittedMemory() +
property_cell_space_->CommittedMemory() + lo_space_->Size();
}
size_t Heap::CommittedPhysicalMemory() {
if (!HasBeenSetUp()) return 0;
return new_space_.CommittedPhysicalMemory() +
old_pointer_space_->CommittedPhysicalMemory() +
old_data_space_->CommittedPhysicalMemory() +
code_space_->CommittedPhysicalMemory() +
map_space_->CommittedPhysicalMemory() +
cell_space_->CommittedPhysicalMemory() +
property_cell_space_->CommittedPhysicalMemory() +
lo_space_->CommittedPhysicalMemory();
}
intptr_t Heap::CommittedMemoryExecutable() {
if (!HasBeenSetUp()) return 0;
return isolate()->memory_allocator()->SizeExecutable();
}
void Heap::UpdateMaximumCommitted() {
if (!HasBeenSetUp()) return;
intptr_t current_committed_memory = CommittedMemory();
if (current_committed_memory > maximum_committed_) {
maximum_committed_ = current_committed_memory;
}
}
intptr_t Heap::Available() {
if (!HasBeenSetUp()) return 0;
return new_space_.Available() + old_pointer_space_->Available() +
old_data_space_->Available() + code_space_->Available() +
map_space_->Available() + cell_space_->Available() +
property_cell_space_->Available();
}
bool Heap::HasBeenSetUp() {
return old_pointer_space_ != NULL && old_data_space_ != NULL &&
code_space_ != NULL && map_space_ != NULL && cell_space_ != NULL &&
property_cell_space_ != NULL && lo_space_ != NULL;
}
int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
if (IntrusiveMarking::IsMarked(object)) {
return IntrusiveMarking::SizeOfMarkedObject(object);
}
return object->SizeFromMap(object->map());
}
GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
const char** reason) {
// Is global GC requested?
if (space != NEW_SPACE) {
isolate_->counters()->gc_compactor_caused_by_request()->Increment();
*reason = "GC in old space requested";
return MARK_COMPACTOR;
}
if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
*reason = "GC in old space forced by flags";
return MARK_COMPACTOR;
}
// Is enough data promoted to justify a global GC?
if (OldGenerationAllocationLimitReached()) {
isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
*reason = "promotion limit reached";
return MARK_COMPACTOR;
}
// Have allocation in OLD and LO failed?
if (old_gen_exhausted_) {
isolate_->counters()
->gc_compactor_caused_by_oldspace_exhaustion()
->Increment();
*reason = "old generations exhausted";
return MARK_COMPACTOR;
}
// Is there enough space left in OLD to guarantee that a scavenge can
// succeed?
//
// Note that MemoryAllocator->MaxAvailable() undercounts the memory available
// for object promotion. It counts only the bytes that the memory
// allocator has not yet allocated from the OS and assigned to any space,
// and does not count available bytes already in the old space or code
// space. Undercounting is safe---we may get an unrequested full GC when
// a scavenge would have succeeded.
if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
isolate_->counters()
->gc_compactor_caused_by_oldspace_exhaustion()
->Increment();
*reason = "scavenge might not succeed";
return MARK_COMPACTOR;
}
// Default
*reason = NULL;
return SCAVENGER;
}
// TODO(1238405): Combine the infrastructure for --heap-stats and
// --log-gc to avoid the complicated preprocessor and flag testing.
void Heap::ReportStatisticsBeforeGC() {
// Heap::ReportHeapStatistics will also log NewSpace statistics when
// compiled --log-gc is set. The following logic is used to avoid
// double logging.
#ifdef DEBUG
if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
if (FLAG_heap_stats) {
ReportHeapStatistics("Before GC");
} else if (FLAG_log_gc) {
new_space_.ReportStatistics();
}
if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
#else
if (FLAG_log_gc) {
new_space_.CollectStatistics();
new_space_.ReportStatistics();
new_space_.ClearHistograms();
}
#endif // DEBUG
}
void Heap::PrintShortHeapStatistics() {
if (!FLAG_trace_gc_verbose) return;
PrintPID("Memory allocator, used: %6" V8_PTR_PREFIX
"d KB"
", available: %6" V8_PTR_PREFIX "d KB\n",
isolate_->memory_allocator()->Size() / KB,
isolate_->memory_allocator()->Available() / KB);
PrintPID("New space, used: %6" V8_PTR_PREFIX
"d KB"
", available: %6" V8_PTR_PREFIX
"d KB"
", committed: %6" V8_PTR_PREFIX "d KB\n",
new_space_.Size() / KB, new_space_.Available() / KB,
new_space_.CommittedMemory() / KB);
PrintPID("Old pointers, used: %6" V8_PTR_PREFIX
"d KB"
", available: %6" V8_PTR_PREFIX
"d KB"
", committed: %6" V8_PTR_PREFIX "d KB\n",
old_pointer_space_->SizeOfObjects() / KB,
old_pointer_space_->Available() / KB,
old_pointer_space_->CommittedMemory() / KB);
PrintPID("Old data space, used: %6" V8_PTR_PREFIX
"d KB"
", available: %6" V8_PTR_PREFIX
"d KB"
", committed: %6" V8_PTR_PREFIX "d KB\n",
old_data_space_->SizeOfObjects() / KB,
old_data_space_->Available() / KB,
old_data_space_->CommittedMemory() / KB);
PrintPID("Code space, used: %6" V8_PTR_PREFIX
"d KB"
", available: %6" V8_PTR_PREFIX
"d KB"
", committed: %6" V8_PTR_PREFIX "d KB\n",
code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
code_space_->CommittedMemory() / KB);
PrintPID("Map space, used: %6" V8_PTR_PREFIX
"d KB"
", available: %6" V8_PTR_PREFIX
"d KB"
", committed: %6" V8_PTR_PREFIX "d KB\n",
map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
map_space_->CommittedMemory() / KB);
PrintPID("Cell space, used: %6" V8_PTR_PREFIX
"d KB"
", available: %6" V8_PTR_PREFIX
"d KB"
", committed: %6" V8_PTR_PREFIX "d KB\n",
cell_space_->SizeOfObjects() / KB, cell_space_->Available() / KB,
cell_space_->CommittedMemory() / KB);
PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX
"d KB"
", available: %6" V8_PTR_PREFIX
"d KB"
", committed: %6" V8_PTR_PREFIX "d KB\n",
property_cell_space_->SizeOfObjects() / KB,
property_cell_space_->Available() / KB,
property_cell_space_->CommittedMemory() / KB);
PrintPID("Large object space, used: %6" V8_PTR_PREFIX
"d KB"
", available: %6" V8_PTR_PREFIX
"d KB"
", committed: %6" V8_PTR_PREFIX "d KB\n",
lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
lo_space_->CommittedMemory() / KB);
PrintPID("All spaces, used: %6" V8_PTR_PREFIX
"d KB"
", available: %6" V8_PTR_PREFIX
"d KB"
", committed: %6" V8_PTR_PREFIX "d KB\n",
this->SizeOfObjects() / KB, this->Available() / KB,
this->CommittedMemory() / KB);
PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_);
}
// TODO(1238405): Combine the infrastructure for --heap-stats and
// --log-gc to avoid the complicated preprocessor and flag testing.
void Heap::ReportStatisticsAfterGC() {
// Similar to the before GC, we use some complicated logic to ensure that
// NewSpace statistics are logged exactly once when --log-gc is turned on.
#if defined(DEBUG)
if (FLAG_heap_stats) {
new_space_.CollectStatistics();
ReportHeapStatistics("After GC");
} else if (FLAG_log_gc) {
new_space_.ReportStatistics();
}
#else
if (FLAG_log_gc) new_space_.ReportStatistics();
#endif // DEBUG
}
void Heap::GarbageCollectionPrologue() {
{
AllowHeapAllocation for_the_first_part_of_prologue;
ClearJSFunctionResultCaches();
gc_count_++;
unflattened_strings_length_ = 0;
if (FLAG_flush_code && FLAG_flush_code_incrementally) {
mark_compact_collector()->EnableCodeFlushing(true);
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
}
#endif
}
// Reset GC statistics.
promoted_objects_size_ = 0;
semi_space_copied_object_size_ = 0;
nodes_died_in_new_space_ = 0;
nodes_copied_in_new_space_ = 0;
nodes_promoted_ = 0;
UpdateMaximumCommitted();
#ifdef DEBUG
DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
if (FLAG_gc_verbose) Print();
ReportStatisticsBeforeGC();
#endif // DEBUG
store_buffer()->GCPrologue();
if (isolate()->concurrent_osr_enabled()) {
isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
}
if (new_space_.IsAtMaximumCapacity()) {
maximum_size_scavenges_++;
} else {
maximum_size_scavenges_ = 0;
}
CheckNewSpaceExpansionCriteria();
}
intptr_t Heap::SizeOfObjects() {
intptr_t total = 0;
AllSpaces spaces(this);
for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
total += space->SizeOfObjects();
}
return total;
}
void Heap::ClearAllICsByKind(Code::Kind kind) {
HeapObjectIterator it(code_space());
for (Object* object = it.Next(); object != NULL; object = it.Next()) {
Code* code = Code::cast(object);
Code::Kind current_kind = code->kind();
if (current_kind == Code::FUNCTION ||
current_kind == Code::OPTIMIZED_FUNCTION) {
code->ClearInlineCaches(kind);
}
}
}
void Heap::RepairFreeListsAfterBoot() {
PagedSpaces spaces(this);
for (PagedSpace* space = spaces.next(); space != NULL;
space = spaces.next()) {
space->RepairFreeListsAfterBoot();
}
}
void Heap::ProcessPretenuringFeedback() {
if (FLAG_allocation_site_pretenuring) {
int tenure_decisions = 0;
int dont_tenure_decisions = 0;
int allocation_mementos_found = 0;
int allocation_sites = 0;
int active_allocation_sites = 0;
// If the scratchpad overflowed, we have to iterate over the allocation
// sites list.
// TODO(hpayer): We iterate over the whole list of allocation sites when
// we grew to the maximum semi-space size to deopt maybe tenured
// allocation sites. We could hold the maybe tenured allocation sites
// in a seperate data structure if this is a performance problem.
bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
bool use_scratchpad =
allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize &&
!deopt_maybe_tenured;
int i = 0;
Object* list_element = allocation_sites_list();
bool trigger_deoptimization = false;
bool maximum_size_scavenge = MaximumSizeScavenge();
while (use_scratchpad ? i < allocation_sites_scratchpad_length_
: list_element->IsAllocationSite()) {
AllocationSite* site =
use_scratchpad
? AllocationSite::cast(allocation_sites_scratchpad()->get(i))
: AllocationSite::cast(list_element);
allocation_mementos_found += site->memento_found_count();
if (site->memento_found_count() > 0) {
active_allocation_sites++;
if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
trigger_deoptimization = true;
}
if (site->GetPretenureMode() == TENURED) {
tenure_decisions++;
} else {
dont_tenure_decisions++;
}
allocation_sites++;
}
if (deopt_maybe_tenured && site->IsMaybeTenure()) {
site->set_deopt_dependent_code(true);
trigger_deoptimization = true;
}
if (use_scratchpad) {
i++;
} else {
list_element = site->weak_next();
}
}
if (trigger_deoptimization) {
isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
}
FlushAllocationSitesScratchpad();
if (FLAG_trace_pretenuring_statistics &&
(allocation_mementos_found > 0 || tenure_decisions > 0 ||
dont_tenure_decisions > 0)) {
PrintF(
"GC: (mode, #visited allocation sites, #active allocation sites, "
"#mementos, #tenure decisions, #donttenure decisions) "
"(%s, %d, %d, %d, %d, %d)\n",
use_scratchpad ? "use scratchpad" : "use list", allocation_sites,
active_allocation_sites, allocation_mementos_found, tenure_decisions,
dont_tenure_decisions);
}
}
}
void Heap::DeoptMarkedAllocationSites() {
// TODO(hpayer): If iterating over the allocation sites list becomes a
// performance issue, use a cache heap data structure instead (similar to the
// allocation sites scratchpad).
Object* list_element = allocation_sites_list();
while (list_element->IsAllocationSite()) {
AllocationSite* site = AllocationSite::cast(list_element);
if (site->deopt_dependent_code()) {
site->dependent_code()->MarkCodeForDeoptimization(
isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
site->set_deopt_dependent_code(false);
}
list_element = site->weak_next();
}
Deoptimizer::DeoptimizeMarkedCode(isolate_);
}
void Heap::GarbageCollectionEpilogue() {
store_buffer()->GCEpilogue();
// In release mode, we only zap the from space under heap verification.
if (Heap::ShouldZapGarbage()) {
ZapFromSpace();
}
// Process pretenuring feedback and update allocation sites.
ProcessPretenuringFeedback();
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
}
#endif
AllowHeapAllocation for_the_rest_of_the_epilogue;
#ifdef DEBUG
if (FLAG_print_global_handles) isolate_->global_handles()->Print();
if (FLAG_print_handles) PrintHandles();
if (FLAG_gc_verbose) Print();
if (FLAG_code_stats) ReportCodeStatistics("After GC");
#endif
if (FLAG_deopt_every_n_garbage_collections > 0) {
// TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that
// the topmost optimized frame can be deoptimized safely, because it
// might not have a lazy bailout point right after its current PC.
if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
Deoptimizer::DeoptimizeAll(isolate());
gcs_since_last_deopt_ = 0;
}
}
UpdateMaximumCommitted();
isolate_->counters()->alive_after_last_gc()->Set(
static_cast<int>(SizeOfObjects()));
isolate_->counters()->string_table_capacity()->Set(
string_table()->Capacity());
isolate_->counters()->number_of_symbols()->Set(
string_table()->NumberOfElements());
if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
(crankshaft_codegen_bytes_generated_ +
full_codegen_bytes_generated_)));
}
if (CommittedMemory() > 0) {
isolate_->counters()->external_fragmentation_total()->AddSample(
static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
isolate_->counters()->heap_fraction_new_space()->AddSample(static_cast<int>(
(new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
isolate_->counters()->heap_fraction_old_pointer_space()->AddSample(
static_cast<int>((old_pointer_space()->CommittedMemory() * 100.0) /
CommittedMemory()));
isolate_->counters()->heap_fraction_old_data_space()->AddSample(
static_cast<int>((old_data_space()->CommittedMemory() * 100.0) /
CommittedMemory()));
isolate_->counters()->heap_fraction_code_space()->AddSample(
static_cast<int>((code_space()->CommittedMemory() * 100.0) /
CommittedMemory()));
isolate_->counters()->heap_fraction_map_space()->AddSample(static_cast<int>(
(map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
isolate_->counters()->heap_fraction_cell_space()->AddSample(
static_cast<int>((cell_space()->CommittedMemory() * 100.0) /
CommittedMemory()));
isolate_->counters()->heap_fraction_property_cell_space()->AddSample(
static_cast<int>((property_cell_space()->CommittedMemory() * 100.0) /
CommittedMemory()));
isolate_->counters()->heap_fraction_lo_space()->AddSample(static_cast<int>(
(lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
isolate_->counters()->heap_sample_total_committed()->AddSample(
static_cast<int>(CommittedMemory() / KB));
isolate_->counters()->heap_sample_total_used()->AddSample(
static_cast<int>(SizeOfObjects() / KB));
isolate_->counters()->heap_sample_map_space_committed()->AddSample(
static_cast<int>(map_space()->CommittedMemory() / KB));
isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
static_cast<int>(cell_space()->CommittedMemory() / KB));
isolate_->counters()
->heap_sample_property_cell_space_committed()
->AddSample(
static_cast<int>(property_cell_space()->CommittedMemory() / KB));
isolate_->counters()->heap_sample_code_space_committed()->AddSample(
static_cast<int>(code_space()->CommittedMemory() / KB));
isolate_->counters()->heap_sample_maximum_committed()->AddSample(
static_cast<int>(MaximumCommittedMemory() / KB));
}
#define UPDATE_COUNTERS_FOR_SPACE(space) \
isolate_->counters()->space##_bytes_available()->Set( \
static_cast<int>(space()->Available())); \
isolate_->counters()->space##_bytes_committed()->Set( \
static_cast<int>(space()->CommittedMemory())); \
isolate_->counters()->space##_bytes_used()->Set( \
static_cast<int>(space()->SizeOfObjects()));
#define UPDATE_FRAGMENTATION_FOR_SPACE(space) \
if (space()->CommittedMemory() > 0) { \
isolate_->counters()->external_fragmentation_##space()->AddSample( \
static_cast<int>(100 - \
(space()->SizeOfObjects() * 100.0) / \
space()->CommittedMemory())); \
}
#define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
UPDATE_COUNTERS_FOR_SPACE(space) \
UPDATE_FRAGMENTATION_FOR_SPACE(space)
UPDATE_COUNTERS_FOR_SPACE(new_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
#undef UPDATE_COUNTERS_FOR_SPACE
#undef UPDATE_FRAGMENTATION_FOR_SPACE
#undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
#ifdef DEBUG
ReportStatisticsAfterGC();
#endif // DEBUG
// Remember the last top pointer so that we can later find out
// whether we allocated in new space since the last GC.
new_space_top_after_last_gc_ = new_space()->top();
}
void Heap::CollectAllGarbage(int flags, const char* gc_reason,
const v8::GCCallbackFlags gc_callback_flags) {
// Since we are ignoring the return value, the exact choice of space does
// not matter, so long as we do not specify NEW_SPACE, which would not
// cause a full GC.
mark_compact_collector_.SetFlags(flags);
CollectGarbage(OLD_POINTER_SPACE, gc_reason, gc_callback_flags);
mark_compact_collector_.SetFlags(kNoGCFlags);
}
void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
// Since we are ignoring the return value, the exact choice of space does
// not matter, so long as we do not specify NEW_SPACE, which would not
// cause a full GC.
// Major GC would invoke weak handle callbacks on weakly reachable
// handles, but won't collect weakly reachable objects until next
// major GC. Therefore if we collect aggressively and weak handle callback
// has been invoked, we rerun major GC to release objects which become
// garbage.
// Note: as weak callbacks can execute arbitrary code, we cannot
// hope that eventually there will be no weak callbacks invocations.
// Therefore stop recollecting after several attempts.
if (isolate()->concurrent_recompilation_enabled()) {
// The optimizing compiler may be unnecessarily holding on to memory.
DisallowHeapAllocation no_recursive_gc;
isolate()->optimizing_compiler_thread()->Flush();
}
mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
kReduceMemoryFootprintMask);
isolate_->compilation_cache()->Clear();
const int kMaxNumberOfAttempts = 7;
const int kMinNumberOfAttempts = 2;
for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL) &&
attempt + 1 >= kMinNumberOfAttempts) {
break;
}
}
mark_compact_collector()->SetFlags(kNoGCFlags);
new_space_.Shrink();
UncommitFromSpace();
incremental_marking()->UncommitMarkingDeque();
}
void Heap::EnsureFillerObjectAtTop() {
// There may be an allocation memento behind every object in new space.
// If we evacuate a not full new space or if we are on the last page of
// the new space, then there may be uninitialized memory behind the top
// pointer of the new space page. We store a filler object there to
// identify the unused space.
Address from_top = new_space_.top();
Address from_limit = new_space_.limit();
if (from_top < from_limit) {
int remaining_in_page = static_cast<int>(from_limit - from_top);
CreateFillerObjectAt(from_top, remaining_in_page);
}
}
bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
const char* collector_reason,
const v8::GCCallbackFlags gc_callback_flags) {
// The VM is in the GC state until exiting this function.
VMState<GC> state(isolate_);
#ifdef DEBUG
// Reset the allocation timeout to the GC interval, but make sure to
// allow at least a few allocations after a collection. The reason
// for this is that we have a lot of allocation sequences and we
// assume that a garbage collection will allow the subsequent
// allocation attempts to go through.
allocation_timeout_ = Max(6, FLAG_gc_interval);
#endif
EnsureFillerObjectAtTop();
if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Scavenge during marking.\n");
}
}
if (collector == MARK_COMPACTOR &&
!mark_compact_collector()->abort_incremental_marking() &&
!incremental_marking()->IsStopped() &&
!incremental_marking()->should_hurry() &&
FLAG_incremental_marking_steps) {
// Make progress in incremental marking.
const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
IncrementalMarking::NO_GC_VIA_STACK_GUARD);
if (!incremental_marking()->IsComplete() && !FLAG_gc_global) {
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
}
collector = SCAVENGER;
collector_reason = "incremental marking delaying mark-sweep";
}
}
bool next_gc_likely_to_collect_more = false;
{
tracer()->Start(collector, gc_reason, collector_reason);
DCHECK(AllowHeapAllocation::IsAllowed());
DisallowHeapAllocation no_allocation_during_gc;
GarbageCollectionPrologue();
{
HistogramTimerScope histogram_timer_scope(
(collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
: isolate_->counters()->gc_compactor());
next_gc_likely_to_collect_more =
PerformGarbageCollection(collector, gc_callback_flags);
}
GarbageCollectionEpilogue();
tracer()->Stop();
}
// Start incremental marking for the next cycle. The heap snapshot
// generator needs incremental marking to stay off after it aborted.
if (!mark_compact_collector()->abort_incremental_marking() &&
incremental_marking()->IsStopped() &&
incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) {
incremental_marking()->Start();
}
return next_gc_likely_to_collect_more;
}
int Heap::NotifyContextDisposed() {
if (isolate()->concurrent_recompilation_enabled()) {
// Flush the queued recompilation tasks.
isolate()->optimizing_compiler_thread()->Flush();
}
flush_monomorphic_ics_ = true;
AgeInlineCaches();
return ++contexts_disposed_;
}
void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
int len) {
if (len == 0) return;
DCHECK(array->map() != fixed_cow_array_map());
Object** dst_objects = array->data_start() + dst_index;
MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
if (!InNewSpace(array)) {
for (int i = 0; i < len; i++) {
// TODO(hpayer): check store buffer for entries
if (InNewSpace(dst_objects[i])) {
RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
}
}
}
incremental_marking()->RecordWrites(array);
}
#ifdef VERIFY_HEAP
// Helper class for verifying the string table.
class StringTableVerifier : public ObjectVisitor {
public:
void VisitPointers(Object** start, Object** end) {
// Visit all HeapObject pointers in [start, end).
for (Object** p = start; p < end; p++) {
if ((*p)->IsHeapObject()) {
// Check that the string is actually internalized.
CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
(*p)->IsInternalizedString());
}
}
}
};
static void VerifyStringTable(Heap* heap) {
StringTableVerifier verifier;
heap->string_table()->IterateElements(&verifier);
}
#endif // VERIFY_HEAP
static bool AbortIncrementalMarkingAndCollectGarbage(
Heap* heap, AllocationSpace space, const char* gc_reason = NULL) {
heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
bool result = heap->CollectGarbage(space, gc_reason);
heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
return result;
}
void Heap::ReserveSpace(int* sizes, Address* locations_out) {
bool gc_performed = true;
int counter = 0;
static const int kThreshold = 20;
while (gc_performed && counter++ < kThreshold) {
gc_performed = false;
DCHECK(NEW_SPACE == FIRST_PAGED_SPACE - 1);
for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
if (sizes[space] != 0) {
AllocationResult allocation;
if (space == NEW_SPACE) {
allocation = new_space()->AllocateRaw(sizes[space]);
} else {
allocation = paged_space(space)->AllocateRaw(sizes[space]);
}
FreeListNode* node;
if (!allocation.To(&node)) {
if (space == NEW_SPACE) {
Heap::CollectGarbage(NEW_SPACE,
"failed to reserve space in the new space");
} else {
AbortIncrementalMarkingAndCollectGarbage(
this, static_cast<AllocationSpace>(space),
"failed to reserve space in paged space");
}
gc_performed = true;
break;
} else {
// Mark with a free list node, in case we have a GC before
// deserializing.
node->set_size(this, sizes[space]);
locations_out[space] = node->address();
}
}
}
}
if (gc_performed) {
// Failed to reserve the space after several attempts.
V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
}
}
void Heap::EnsureFromSpaceIsCommitted() {
if (new_space_.CommitFromSpaceIfNeeded()) return;
// Committing memory to from space failed.
// Memory is exhausted and we will die.
V8::FatalProcessOutOfMemory("Committing semi space failed.");
}
void Heap::ClearJSFunctionResultCaches() {
if (isolate_->bootstrapper()->IsActive()) return;
Object* context = native_contexts_list();
while (!context->IsUndefined()) {
// Get the caches for this context. GC can happen when the context
// is not fully initialized, so the caches can be undefined.
Object* caches_or_undefined =
Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
if (!caches_or_undefined->IsUndefined()) {
FixedArray* caches = FixedArray::cast(caches_or_undefined);
// Clear the caches:
int length = caches->length();
for (int i = 0; i < length; i++) {
JSFunctionResultCache::cast(caches->get(i))->Clear();
}
}
// Get the next context:
context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
}
}
void Heap::ClearNormalizedMapCaches() {
if (isolate_->bootstrapper()->IsActive() &&
!incremental_marking()->IsMarking()) {
return;
}
Object* context = native_contexts_list();
while (!context->IsUndefined()) {
// GC can happen when the context is not fully initialized,
// so the cache can be undefined.
Object* cache =
Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
if (!cache->IsUndefined()) {
NormalizedMapCache::cast(cache)->Clear();
}
context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
}
}
void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
if (start_new_space_size == 0) return;
promotion_rate_ = (static_cast<double>(promoted_objects_size_) /
static_cast<double>(start_new_space_size) * 100);
semi_space_copied_rate_ =
(static_cast<double>(semi_space_copied_object_size_) /
static_cast<double>(start_new_space_size) * 100);
double survival_rate = promotion_rate_ + semi_space_copied_rate_;
if (survival_rate > kYoungSurvivalRateHighThreshold) {
high_survival_rate_period_length_++;
} else {
high_survival_rate_period_length_ = 0;
}
}
bool Heap::PerformGarbageCollection(
GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
int freed_global_handles = 0;
if (collector != SCAVENGER) {
PROFILE(isolate_, CodeMovingGCEvent());
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
VerifyStringTable(this);
}
#endif
GCType gc_type =
collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
{
GCCallbacksScope scope(this);
if (scope.CheckReenter()) {
AllowHeapAllocation allow_allocation;
GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
VMState<EXTERNAL> state(isolate_);
HandleScope handle_scope(isolate_);
CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
}
}
EnsureFromSpaceIsCommitted();
int start_new_space_size = Heap::new_space()->SizeAsInt();
if (IsHighSurvivalRate()) {
// We speed up the incremental marker if it is running so that it
// does not fall behind the rate of promotion, which would cause a
// constantly growing old space.
incremental_marking()->NotifyOfHighPromotionRate();
}
if (collector == MARK_COMPACTOR) {
// Perform mark-sweep with optional compaction.
MarkCompact();
sweep_generation_++;
// Temporarily set the limit for case when PostGarbageCollectionProcessing
// allocates and triggers GC. The real limit is set at after
// PostGarbageCollectionProcessing.
old_generation_allocation_limit_ =
OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
old_gen_exhausted_ = false;
} else {
Scavenge();
}
UpdateSurvivalStatistics(start_new_space_size);
isolate_->counters()->objs_since_last_young()->Set(0);
// Callbacks that fire after this point might trigger nested GCs and
// restart incremental marking, the assertion can't be moved down.
DCHECK(collector == SCAVENGER || incremental_marking()->IsStopped());
gc_post_processing_depth_++;
{
AllowHeapAllocation allow_allocation;
GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
freed_global_handles =
isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
}
gc_post_processing_depth_--;
isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
// Update relocatables.
Relocatable::PostGarbageCollectionProcessing(isolate_);
if (collector == MARK_COMPACTOR) {
// Register the amount of external allocated memory.
amount_of_external_allocated_memory_at_last_global_gc_ =
amount_of_external_allocated_memory_;
old_generation_allocation_limit_ = OldGenerationAllocationLimit(
PromotedSpaceSizeOfObjects(), freed_global_handles);
}
{
GCCallbacksScope scope(this);
if (scope.CheckReenter()) {
AllowHeapAllocation allow_allocation;
GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
VMState<EXTERNAL> state(isolate_);
HandleScope handle_scope(isolate_);
CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
}
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
VerifyStringTable(this);
}
#endif
return freed_global_handles > 0;
}
void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
if (gc_type & gc_prologue_callbacks_[i].gc_type) {
if (!gc_prologue_callbacks_[i].pass_isolate_) {
v8::GCPrologueCallback callback =
reinterpret_cast<v8::GCPrologueCallback>(
gc_prologue_callbacks_[i].callback);
callback(gc_type, flags);
} else {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
}
}
}
}
void Heap::CallGCEpilogueCallbacks(GCType gc_type,
GCCallbackFlags gc_callback_flags) {
for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
if (!gc_epilogue_callbacks_[i].pass_isolate_) {
v8::GCPrologueCallback callback =
reinterpret_cast<v8::GCPrologueCallback>(
gc_epilogue_callbacks_[i].callback);
callback(gc_type, gc_callback_flags);
} else {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
gc_epilogue_callbacks_[i].callback(isolate, gc_type, gc_callback_flags);
}
}
}
}
void Heap::MarkCompact() {
gc_state_ = MARK_COMPACT;
LOG(isolate_, ResourceEvent("markcompact", "begin"));
uint64_t size_of_objects_before_gc = SizeOfObjects();
mark_compact_collector_.Prepare();
ms_count_++;
MarkCompactPrologue();
mark_compact_collector_.CollectGarbage();
LOG(isolate_, ResourceEvent("markcompact", "end"));
gc_state_ = NOT_IN_GC;
isolate_->counters()->objs_since_last_full()->Set(0);
flush_monomorphic_ics_ = false;
if (FLAG_allocation_site_pretenuring) {
EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
}
}
void Heap::MarkCompactPrologue() {
// At any old GC clear the keyed lookup cache to enable collection of unused
// maps.
isolate_->keyed_lookup_cache()->Clear();
isolate_->context_slot_cache()->Clear();
isolate_->descriptor_lookup_cache()->Clear();
RegExpResultsCache::Clear(string_split_cache());
RegExpResultsCache::Clear(regexp_multiple_cache());
isolate_->compilation_cache()->MarkCompactPrologue();
CompletelyClearInstanceofCache();
FlushNumberStringCache();
if (FLAG_cleanup_code_caches_at_gc) {
polymorphic_code_cache()->set_cache(undefined_value());
}
ClearNormalizedMapCaches();
}
// Helper class for copying HeapObjects
class ScavengeVisitor : public ObjectVisitor {
public:
explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
void VisitPointer(Object** p) { ScavengePointer(p); }
void VisitPointers(Object** start, Object** end) {
// Copy all HeapObject pointers in [start, end)
for (Object** p = start; p < end; p++) ScavengePointer(p);
}
private:
void ScavengePointer(Object** p) {
Object* object = *p;
if (!heap_->InNewSpace(object)) return;
Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
reinterpret_cast<HeapObject*>(object));
}
Heap* heap_;
};
#ifdef VERIFY_HEAP
// Visitor class to verify pointers in code or data space do not point into
// new space.
class VerifyNonPointerSpacePointersVisitor : public ObjectVisitor {
public:
explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
void VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
}
}
}
private:
Heap* heap_;
};
static void VerifyNonPointerSpacePointers(Heap* heap) {
// Verify that there are no pointers to new space in spaces where we
// do not expect them.
VerifyNonPointerSpacePointersVisitor v(heap);
HeapObjectIterator code_it(heap->code_space());
for (HeapObject* object = code_it.Next(); object != NULL;
object = code_it.Next())
object->Iterate(&v);
// The old data space was normally swept conservatively so that the iterator
// doesn't work, so we normally skip the next bit.
if (heap->old_data_space()->swept_precisely()) {
HeapObjectIterator data_it(heap->old_data_space());
for (HeapObject* object = data_it.Next(); object != NULL;
object = data_it.Next())
object->Iterate(&v);
}
}
#endif // VERIFY_HEAP
void Heap::CheckNewSpaceExpansionCriteria() {
if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
survived_since_last_expansion_ > new_space_.Capacity()) {
// Grow the size of new space if there is room to grow, enough data
// has survived scavenge since the last expansion and we are not in
// high promotion mode.
new_space_.Grow();
survived_since_last_expansion_ = 0;
}
}
static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
return heap->InNewSpace(*p) &&
!HeapObject::cast(*p)->map_word().IsForwardingAddress();
}
void Heap::ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
StoreBufferEvent event) {
heap->store_buffer_rebuilder_.Callback(page, event);
}
void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
if (event == kStoreBufferStartScanningPagesEvent) {
start_of_current_page_ = NULL;
current_page_ = NULL;
} else if (event == kStoreBufferScanningPageEvent) {
if (current_page_ != NULL) {
// If this page already overflowed the store buffer during this iteration.
if (current_page_->scan_on_scavenge()) {
// Then we should wipe out the entries that have been added for it.
store_buffer_->SetTop(start_of_current_page_);
} else if (store_buffer_->Top() - start_of_current_page_ >=
(store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
// Did we find too many pointers in the previous page? The heuristic is
// that no page can take more then 1/5 the remaining slots in the store
// buffer.
current_page_->set_scan_on_scavenge(true);
store_buffer_->SetTop(start_of_current_page_);
} else {
// In this case the page we scanned took a reasonable number of slots in
// the store buffer. It has now been rehabilitated and is no longer
// marked scan_on_scavenge.
DCHECK(!current_page_->scan_on_scavenge());
}
}
start_of_current_page_ = store_buffer_->Top();
current_page_ = page;
} else if (event == kStoreBufferFullEvent) {
// The current page overflowed the store buffer again. Wipe out its entries
// in the store buffer and mark it scan-on-scavenge again. This may happen
// several times while scanning.
if (current_page_ == NULL) {
// Store Buffer overflowed while scanning promoted objects. These are not
// in any particular page, though they are likely to be clustered by the
// allocation routines.
store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
} else {
// Store Buffer overflowed while scanning a particular old space page for
// pointers to new space.
DCHECK(current_page_ == page);
DCHECK(page != NULL);
current_page_->set_scan_on_scavenge(true);
DCHECK(start_of_current_page_ != store_buffer_->Top());
store_buffer_->SetTop(start_of_current_page_);
}
} else {
UNREACHABLE();
}
}
void PromotionQueue::Initialize() {
// Assumes that a NewSpacePage exactly fits a number of promotion queue
// entries (where each is a pair of intptr_t). This allows us to simplify
// the test fpr when to switch pages.
DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
0);
limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
front_ = rear_ =
reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
emergency_stack_ = NULL;
guard_ = false;
}
void PromotionQueue::RelocateQueueHead() {
DCHECK(emergency_stack_ == NULL);
Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
intptr_t* head_start = rear_;
intptr_t* head_end = Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
int entries_count =
static_cast<int>(head_end - head_start) / kEntrySizeInWords;
emergency_stack_ = new List<Entry>(2 * entries_count);
while (head_start != head_end) {
int size = static_cast<int>(*(head_start++));
HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
emergency_stack_->Add(Entry(obj, size));
}
rear_ = head_end;
}
class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
public:
explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) {}
virtual Object* RetainAs(Object* object) {
if (!heap_->InFromSpace(object)) {
return object;
}
MapWord map_word = HeapObject::cast(object)->map_word();
if (map_word.IsForwardingAddress()) {
return map_word.ToForwardingAddress();
}
return NULL;
}
private:
Heap* heap_;
};
void Heap::Scavenge() {
RelocationLock relocation_lock(this);
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
#endif
gc_state_ = SCAVENGE;
// Implements Cheney's copying algorithm
LOG(isolate_, ResourceEvent("scavenge", "begin"));
// Clear descriptor cache.
isolate_->descriptor_lookup_cache()->Clear();
// Used for updating survived_since_last_expansion_ at function end.
intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
SelectScavengingVisitorsTable();
incremental_marking()->PrepareForScavenge();
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
new_space_.Flip();
new_space_.ResetAllocationInfo();
// We need to sweep newly copied objects which can be either in the
// to space or promoted to the old generation. For to-space
// objects, we treat the bottom of the to space as a queue. Newly
// copied and unswept objects lie between a 'front' mark and the
// allocation pointer.
//
// Promoted objects can go into various old-generation spaces, and
// can be allocated internally in the spaces (from the free list).
// We treat the top of the to space as a queue of addresses of
// promoted objects. The addresses of newly promoted and unswept
// objects lie between a 'front' mark and a 'rear' mark that is
// updated as a side effect of promoting an object.
//
// There is guaranteed to be enough room at the top of the to space
// for the addresses of promoted objects: every object promoted
// frees up its size in bytes from the top of the new space, and
// objects are at least one pointer in size.
Address new_space_front = new_space_.ToSpaceStart();
promotion_queue_.Initialize();
#ifdef DEBUG
store_buffer()->Clean();
#endif
ScavengeVisitor scavenge_visitor(this);
// Copy roots.
IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
// Copy objects reachable from the old generation.
{
StoreBufferRebuildScope scope(this, store_buffer(),
&ScavengeStoreBufferCallback);
store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
}
// Copy objects reachable from simple cells by scavenging cell values
// directly.
HeapObjectIterator cell_iterator(cell_space_);
for (HeapObject* heap_object = cell_iterator.Next(); heap_object != NULL;
heap_object = cell_iterator.Next()) {
if (heap_object->IsCell()) {
Cell* cell = Cell::cast(heap_object);
Address value_address = cell->ValueAddress();
scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
}
}
// Copy objects reachable from global property cells by scavenging global
// property cell values directly.
HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
heap_object != NULL;
heap_object = js_global_property_cell_iterator.Next()) {
if (heap_object->IsPropertyCell()) {
PropertyCell* cell = PropertyCell::cast(heap_object);
Address value_address = cell->ValueAddress();
scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
Address type_address = cell->TypeAddress();
scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
}
}
// Copy objects reachable from the encountered weak collections list.
scavenge_visitor.VisitPointer(&encountered_weak_collections_);
// Copy objects reachable from the code flushing candidates list.
MarkCompactCollector* collector = mark_compact_collector();
if (collector->is_code_flushing_enabled()) {
collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
}
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
while (isolate()->global_handles()->IterateObjectGroups(
&scavenge_visitor, &IsUnscavengedHeapObject)) {
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
}
isolate()->global_handles()->RemoveObjectGroups();
isolate()->global_handles()->RemoveImplicitRefGroups();
isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
&IsUnscavengedHeapObject);
isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
&scavenge_visitor);
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
UpdateNewSpaceReferencesInExternalStringTable(
&UpdateNewSpaceReferenceInExternalStringTableEntry);
promotion_queue_.Destroy();
incremental_marking()->UpdateMarkingDequeAfterScavenge();
ScavengeWeakObjectRetainer weak_object_retainer(this);
ProcessWeakReferences(&weak_object_retainer);
DCHECK(new_space_front == new_space_.top());
// Set age mark.
new_space_.set_age_mark(new_space_.top());
new_space_.LowerInlineAllocationLimit(
new_space_.inline_allocation_limit_step());
// Update how much has survived scavenge.
IncrementYoungSurvivorsCounter(static_cast<int>(
(PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
LOG(isolate_, ResourceEvent("scavenge", "end"));
gc_state_ = NOT_IN_GC;
scavenges_since_last_idle_round_++;
}
String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
Object** p) {
MapWord first_word = HeapObject::cast(*p)->map_word();
if (!first_word.IsForwardingAddress()) {
// Unreachable external string can be finalized.
heap->FinalizeExternalString(String::cast(*p));
return NULL;
}
// String is still reachable.
return String::cast(first_word.ToForwardingAddress());
}
void Heap::UpdateNewSpaceReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func) {
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
external_string_table_.Verify();
}
#endif
if (external_string_table_.new_space_strings_.is_empty()) return;
Object** start = &external_string_table_.new_space_strings_[0];
Object** end = start + external_string_table_.new_space_strings_.length();
Object** last = start;
for (Object** p = start; p < end; ++p) {
DCHECK(InFromSpace(*p));
String* target = updater_func(this, p);
if (target == NULL) continue;
DCHECK(target->IsExternalString());
if (InNewSpace(target)) {
// String is still in new space. Update the table entry.
*last = target;
++last;
} else {
// String got promoted. Move it to the old string list.
external_string_table_.AddOldString(target);
}
}
DCHECK(last <= end);
external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
}
void Heap::UpdateReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func) {
// Update old space string references.
if (external_string_table_.old_space_strings_.length() > 0) {
Object** start = &external_string_table_.old_space_strings_[0];
Object** end = start + external_string_table_.old_space_strings_.length();
for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
}
UpdateNewSpaceReferencesInExternalStringTable(updater_func);
}
void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
ProcessArrayBuffers(retainer);
ProcessNativeContexts(retainer);
// TODO(mvstanton): AllocationSites only need to be processed during
// MARK_COMPACT, as they live in old space. Verify and address.
ProcessAllocationSites(retainer);
}
void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
// Update the head of the list of contexts.
set_native_contexts_list(head);
}
void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer) {
Object* array_buffer_obj =
VisitWeakList<JSArrayBuffer>(this, array_buffers_list(), retainer);
set_array_buffers_list(array_buffer_obj);
}
void Heap::TearDownArrayBuffers() {
Object* undefined = undefined_value();
for (Object* o = array_buffers_list(); o != undefined;) {
JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
Runtime::FreeArrayBuffer(isolate(), buffer);
o = buffer->weak_next();
}
set_array_buffers_list(undefined);
}
void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
Object* allocation_site_obj =
VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
set_allocation_sites_list(allocation_site_obj);
}
void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
DisallowHeapAllocation no_allocation_scope;
Object* cur = allocation_sites_list();
bool marked = false;
while (cur->IsAllocationSite()) {
AllocationSite* casted = AllocationSite::cast(cur);
if (casted->GetPretenureMode() == flag) {
casted->ResetPretenureDecision();
casted->set_deopt_dependent_code(true);
marked = true;
}
cur = casted->weak_next();
}
if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
}
void Heap::EvaluateOldSpaceLocalPretenuring(
uint64_t size_of_objects_before_gc) {
uint64_t size_of_objects_after_gc = SizeOfObjects();
double old_generation_survival_rate =
(static_cast<double>(size_of_objects_after_gc) * 100) /
static_cast<double>(size_of_objects_before_gc);
if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
// Too many objects died in the old generation, pretenuring of wrong
// allocation sites may be the cause for that. We have to deopt all
// dependent code registered in the allocation sites to re-evaluate
// our pretenuring decisions.
ResetAllAllocationSitesDependentCode(TENURED);
if (FLAG_trace_pretenuring) {
PrintF(
"Deopt all allocation sites dependent code due to low survival "
"rate in the old generation %f\n",
old_generation_survival_rate);
}
}
}
void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
DisallowHeapAllocation no_allocation;
// All external strings are listed in the external string table.
class ExternalStringTableVisitorAdapter : public ObjectVisitor {
public:
explicit ExternalStringTableVisitorAdapter(
v8::ExternalResourceVisitor* visitor)
: visitor_(visitor) {}
virtual void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
DCHECK((*p)->IsExternalString());
visitor_->VisitExternalString(
Utils::ToLocal(Handle<String>(String::cast(*p))));
}
}
private:
v8::ExternalResourceVisitor* visitor_;
} external_string_table_visitor(visitor);
external_string_table_.Iterate(&external_string_table_visitor);
}
class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
public:
static inline void VisitPointer(Heap* heap, Object** p) {
Object* object = *p;
if (!heap->InNewSpace(object)) return;
Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
reinterpret_cast<HeapObject*>(object));
}
};
Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
Address new_space_front) {
do {
SemiSpace::AssertValidRange(new_space_front, new_space_.top());
// The addresses new_space_front and new_space_.top() define a
// queue of unprocessed copied objects. Process them until the
// queue is empty.
while (new_space_front != new_space_.top()) {
if (!NewSpacePage::IsAtEnd(new_space_front)) {
HeapObject* object = HeapObject::FromAddress(new_space_front);
new_space_front +=
NewSpaceScavenger::IterateBody(object->map(), object);
} else {
new_space_front =
NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
}
}
// Promote and process all the to-be-promoted objects.
{
StoreBufferRebuildScope scope(this, store_buffer(),
&ScavengeStoreBufferCallback);
while (!promotion_queue()->is_empty()) {
HeapObject* target;
int size;
promotion_queue()->remove(&target, &size);
// Promoted object might be already partially visited
// during old space pointer iteration. Thus we search specificly
// for pointers to from semispace instead of looking for pointers
// to new space.
DCHECK(!target->IsMap());
IterateAndMarkPointersToFromSpace(
target->address(), target->address() + size, &ScavengeObject);
}
}
// Take another spin if there are now unswept objects in new space
// (there are currently no more unswept promoted objects).
} while (new_space_front != new_space_.top());
return new_space_front;
}
STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
0); // NOLINT
STATIC_ASSERT((ConstantPoolArray::kFirstEntryOffset & kDoubleAlignmentMask) ==
0); // NOLINT
STATIC_ASSERT((ConstantPoolArray::kExtendedFirstOffset &
kDoubleAlignmentMask) == 0); // NOLINT
INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
int size));
static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
int size) {
if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
heap->CreateFillerObjectAt(object->address(), kPointerSize);
return HeapObject::FromAddress(object->address() + kPointerSize);
} else {
heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
kPointerSize);
return object;
}
}
enum LoggingAndProfiling {
LOGGING_AND_PROFILING_ENABLED,
LOGGING_AND_PROFILING_DISABLED
};
enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
template <MarksHandling marks_handling,
LoggingAndProfiling logging_and_profiling_mode>
class ScavengingVisitor : public StaticVisitorBase {
public:
static void Initialize() {
table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
table_.Register(kVisitByteArray, &EvacuateByteArray);
table_.Register(kVisitFixedArray, &EvacuateFixedArray);
table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
table_.Register(
kVisitNativeContext,
&ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
Context::kSize>);
table_.Register(
kVisitConsString,
&ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
ConsString::kSize>);
table_.Register(
kVisitSlicedString,
&ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
SlicedString::kSize>);
table_.Register(
kVisitSymbol,
&ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
Symbol::kSize>);
table_.Register(
kVisitSharedFunctionInfo,
&ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
SharedFunctionInfo::kSize>);
table_.Register(kVisitJSWeakCollection,
&ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
table_.Register(kVisitJSArrayBuffer,
&ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
table_.Register(kVisitJSTypedArray,
&ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
table_.Register(kVisitJSDataView,
&ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
table_.Register(kVisitJSRegExp,
&ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
if (marks_handling == IGNORE_MARKS) {
table_.Register(
kVisitJSFunction,
&ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
JSFunction::kSize>);
} else {
table_.Register(kVisitJSFunction, &EvacuateJSFunction);
}
table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
kVisitDataObject, kVisitDataObjectGeneric>();
table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
kVisitJSObject, kVisitJSObjectGeneric>();
table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
kVisitStruct, kVisitStructGeneric>();
}
static VisitorDispatchTable<ScavengingCallback>* GetTable() {
return &table_;
}
private:
enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
bool should_record = false;
#ifdef DEBUG
should_record = FLAG_heap_stats;
#endif
should_record = should_record || FLAG_log_gc;
if (should_record) {
if (heap->new_space()->Contains(obj)) {
heap->new_space()->RecordAllocation(obj);
} else {
heap->new_space()->RecordPromotion(obj);
}
}
}
// Helper function used by CopyObject to copy a source object to an
// allocated target object and update the forwarding pointer in the source
// object. Returns the target object.
INLINE(static void MigrateObject(Heap* heap, HeapObject* source,
HeapObject* target, int size)) {
// If we migrate into to-space, then the to-space top pointer should be
// right after the target object. Incorporate double alignment
// over-allocation.
DCHECK(!heap->InToSpace(target) ||
target->address() + size == heap->new_space()->top() ||
target->address() + size + kPointerSize == heap->new_space()->top());
// Make sure that we do not overwrite the promotion queue which is at
// the end of to-space.
DCHECK(!heap->InToSpace(target) ||
heap->promotion_queue()->IsBelowPromotionQueue(
heap->new_space()->top()));
// Copy the content of source to target.
heap->CopyBlock(target->address(), source->address(), size);
// Set the forwarding address.
source->set_map_word(MapWord::FromForwardingAddress(target));
if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
// Update NewSpace stats if necessary.
RecordCopiedObject(heap, target);
heap->OnMoveEvent(target, source, size);
}
if (marks_handling == TRANSFER_MARKS) {
if (Marking::TransferColor(source, target)) {
MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
}
}
}
template <int alignment>
static inline bool SemiSpaceCopyObject(Map* map, HeapObject** slot,
HeapObject* object, int object_size) {
Heap* heap = map->GetHeap();
int allocation_size = object_size;
if (alignment != kObjectAlignment) {
DCHECK(alignment == kDoubleAlignment);
allocation_size += kPointerSize;
}
DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
AllocationResult allocation =
heap->new_space()->AllocateRaw(allocation_size);
HeapObject* target = NULL; // Initialization to please compiler.
if (allocation.To(&target)) {
if (alignment != kObjectAlignment) {
target = EnsureDoubleAligned(heap, target, allocation_size);
}
// Order is important here: Set the promotion limit before migrating
// the object. Otherwise we may end up overwriting promotion queue
// entries when we migrate the object.
heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
// Order is important: slot might be inside of the target if target
// was allocated over a dead object and slot comes from the store
// buffer.
*slot = target;
MigrateObject(heap, object, target, object_size);
heap->IncrementSemiSpaceCopiedObjectSize(object_size);
return true;
}
return false;
}
template <ObjectContents object_contents, int alignment>
static inline bool PromoteObject(Map* map, HeapObject** slot,
HeapObject* object, int object_size) {
Heap* heap = map->GetHeap();
int allocation_size = object_size;
if (alignment != kObjectAlignment) {
DCHECK(alignment == kDoubleAlignment);
allocation_size += kPointerSize;
}
AllocationResult allocation;
if (object_contents == DATA_OBJECT) {
DCHECK(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
allocation = heap->old_data_space()->AllocateRaw(allocation_size);
} else {
DCHECK(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
}
HeapObject* target = NULL; // Initialization to please compiler.
if (allocation.To(&target)) {
if (alignment != kObjectAlignment) {
target = EnsureDoubleAligned(heap, target, allocation_size);
}
// Order is important: slot might be inside of the target if target
// was allocated over a dead object and slot comes from the store
// buffer.
*slot = target;
MigrateObject(heap, object, target, object_size);
if (object_contents == POINTER_OBJECT) {
if (map->instance_type() == JS_FUNCTION_TYPE) {
heap->promotion_queue()->insert(target,
JSFunction::kNonWeakFieldsEndOffset);
} else {
heap->promotion_queue()->insert(target, object_size);
}
}
heap->IncrementPromotedObjectsSize(object_size);
return true;
}
return false;
}
template <ObjectContents object_contents, int alignment>
static inline void EvacuateObject(Map* map, HeapObject** slot,
HeapObject* object, int object_size) {
SLOW_DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
SLOW_DCHECK(object->Size() == object_size);
Heap* heap = map->GetHeap();
if (!heap->ShouldBePromoted(object->address(), object_size)) {
// A semi-space copy may fail due to fragmentation. In that case, we
// try to promote the object.
if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) {
return;
}
}
if (PromoteObject<object_contents, alignment>(map, slot, object,
object_size)) {
return;
}
// If promotion failed, we try to copy the object to the other semi-space
if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return;
UNREACHABLE();
}
static inline void EvacuateJSFunction(Map* map, HeapObject** slot,
HeapObject* object) {
ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
JSFunction::kSize>(map, slot, object);
HeapObject* target = *slot;
MarkBit mark_bit = Marking::MarkBitFrom(target);
if (Marking::IsBlack(mark_bit)) {
// This object is black and it might not be rescanned by marker.
// We should explicitly record code entry slot for compaction because
// promotion queue processing (IterateAndMarkPointersToFromSpace) will
// miss it as it is not HeapObject-tagged.
Address code_entry_slot =
target->address() + JSFunction::kCodeEntryOffset;
Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
map->GetHeap()->mark_compact_collector()->RecordCodeEntrySlot(
code_entry_slot, code);
}
}
static inline void EvacuateFixedArray(Map* map, HeapObject** slot,
HeapObject* object) {
int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
object_size);
}
static inline void EvacuateFixedDoubleArray(Map* map, HeapObject** slot,
HeapObject* object) {
int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
int object_size = FixedDoubleArray::SizeFor(length);
EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
object_size);
}
static inline void EvacuateFixedTypedArray(Map* map, HeapObject** slot,
HeapObject* object) {
int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
object_size);
}
static inline void EvacuateFixedFloat64Array(Map* map, HeapObject** slot,
HeapObject* object) {
int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
object_size);
}
static inline void EvacuateByteArray(Map* map, HeapObject** slot,
HeapObject* object) {
int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
object_size);
}
static inline void EvacuateSeqOneByteString(Map* map, HeapObject** slot,
HeapObject* object) {
int object_size = SeqOneByteString::cast(object)
->SeqOneByteStringSize(map->instance_type());
EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
object_size);
}
static inline void EvacuateSeqTwoByteString(Map* map, HeapObject** slot,
HeapObject* object) {
int object_size = SeqTwoByteString::cast(object)
->SeqTwoByteStringSize(map->instance_type());
EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
object_size);
}
static inline void EvacuateShortcutCandidate(Map* map, HeapObject** slot,
HeapObject* object) {
DCHECK(IsShortcutCandidate(map->instance_type()));
Heap* heap = map->GetHeap();
if (marks_handling == IGNORE_MARKS &&
ConsString::cast(object)->unchecked_second() == heap->empty_string()) {
HeapObject* first =
HeapObject::cast(ConsString::cast(object)->unchecked_first());
*slot = first;
if (!heap->InNewSpace(first)) {
object->set_map_word(MapWord::FromForwardingAddress(first));
return;
}
MapWord first_word = first->map_word();
if (first_word.IsForwardingAddress()) {
HeapObject* target = first_word.ToForwardingAddress();
*slot = target;
object->set_map_word(MapWord::FromForwardingAddress(target));
return;
}
heap->DoScavengeObject(first->map(), slot, first);
object->set_map_word(MapWord::FromForwardingAddress(*slot));
return;
}
int object_size = ConsString::kSize;
EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
object_size);
}
template <ObjectContents object_contents>
class ObjectEvacuationStrategy {
public:
template <int object_size>
static inline void VisitSpecialized(Map* map, HeapObject** slot,
HeapObject* object) {
EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
object_size);
}
static inline void Visit(Map* map, HeapObject** slot, HeapObject* object) {
int object_size = map->instance_size();
EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
object_size);
}
};
static VisitorDispatchTable<ScavengingCallback> table_;
};
template <MarksHandling marks_handling,
LoggingAndProfiling logging_and_profiling_mode>
VisitorDispatchTable<ScavengingCallback>
ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
static void InitializeScavengingVisitorsTables() {
ScavengingVisitor<TRANSFER_MARKS,
LOGGING_AND_PROFILING_DISABLED>::Initialize();
ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
ScavengingVisitor<TRANSFER_MARKS,
LOGGING_AND_PROFILING_ENABLED>::Initialize();
ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
}
void Heap::SelectScavengingVisitorsTable() {
bool logging_and_profiling =
FLAG_verify_predictable || isolate()->logger()->is_logging() ||
isolate()->cpu_profiler()->is_profiling() ||
(isolate()->heap_profiler() != NULL &&
isolate()->heap_profiler()->is_tracking_object_moves());
if (!incremental_marking()->IsMarking()) {
if (!logging_and_profiling) {
scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
} else {
scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
}
} else {
if (!logging_and_profiling) {
scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
TRANSFER_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
} else {
scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
TRANSFER_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
}
if (incremental_marking()->IsCompacting()) {
// When compacting forbid short-circuiting of cons-strings.
// Scavenging code relies on the fact that new space object
// can't be evacuated into evacuation candidate but
// short-circuiting violates this assumption.
scavenging_visitors_table_.Register(
StaticVisitorBase::kVisitShortcutCandidate,
scavenging_visitors_table_.GetVisitorById(
StaticVisitorBase::kVisitConsString));
}
}
}
void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
SLOW_DCHECK(object->GetIsolate()->heap()->InFromSpace(object));
MapWord first_word = object->map_word();
SLOW_DCHECK(!first_word.IsForwardingAddress());
Map* map = first_word.ToMap();
map->GetHeap()->DoScavengeObject(map, p, object);
}
AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
int instance_size) {
Object* result;
AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
if (!allocation.To(&result)) return allocation;
// Map::cast cannot be used due to uninitialized map field.
reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
reinterpret_cast<Map*>(result)->set_visitor_id(
StaticVisitorBase::GetVisitorId(instance_type, instance_size));
reinterpret_cast<Map*>(result)->set_inobject_properties(0);
reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
reinterpret_cast<Map*>(result)->set_bit_field(0);
reinterpret_cast<Map*>(result)->set_bit_field2(0);
int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
Map::OwnsDescriptors::encode(true);
reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
return result;
}
AllocationResult Heap::AllocateMap(InstanceType instance_type,
int instance_size,
ElementsKind elements_kind) {
HeapObject* result;
AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
if (!allocation.To(&result)) return allocation;
result->set_map_no_write_barrier(meta_map());
Map* map = Map::cast(result);
map->set_instance_type(instance_type);
map->set_visitor_id(
StaticVisitorBase::GetVisitorId(instance_type, instance_size));
map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
map->set_instance_size(instance_size);
map->set_inobject_properties(0);
map->set_pre_allocated_property_fields(0);
map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
SKIP_WRITE_BARRIER);
map->init_back_pointer(undefined_value());
map->set_unused_property_fields(0);
map->set_instance_descriptors(empty_descriptor_array());
map->set_bit_field(0);
map->set_bit_field2(1 << Map::kIsExtensible);
int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
Map::OwnsDescriptors::encode(true);
map->set_bit_field3(bit_field3);
map->set_elements_kind(elements_kind);
return map;
}
AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
AllocationSpace space) {
HeapObject* obj;
{
AllocationResult allocation = AllocateRaw(size, space, space);
if (!allocation.To(&obj)) return allocation;
}
#ifdef DEBUG
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
DCHECK(chunk->owner()->identity() == space);
#endif
CreateFillerObjectAt(obj->address(), size);
return obj;
}
const Heap::StringTypeTable Heap::string_type_table[] = {
#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
{ type, size, k##camel_name##MapRootIndex } \
,
STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
#undef STRING_TYPE_ELEMENT
};
const Heap::ConstantStringTable Heap::constant_string_table[] = {
#define CONSTANT_STRING_ELEMENT(name, contents) \
{ contents, k##name##RootIndex } \
,
INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
#undef CONSTANT_STRING_ELEMENT
};
const Heap::StructTable Heap::struct_table[] = {
#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
{ NAME##_TYPE, Name::kSize, k##Name##MapRootIndex } \
,
STRUCT_LIST(STRUCT_TABLE_ELEMENT)
#undef STRUCT_TABLE_ELEMENT
};
bool Heap::CreateInitialMaps() {
HeapObject* obj;
{
AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
if (!allocation.To(&obj)) return false;
}
// Map::cast cannot be used due to uninitialized map field.
Map* new_meta_map = reinterpret_cast<Map*>(obj);
set_meta_map(new_meta_map);
new_meta_map->set_map(new_meta_map);
{ // Partial map allocation
#define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name) \
{ \
Map* map; \
if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
set_##field_name##_map(map); \
}
ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
ALLOCATE_PARTIAL_MAP(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel,
constant_pool_array);
#undef ALLOCATE_PARTIAL_MAP
}
// Allocate the empty array.
{
AllocationResult allocation = AllocateEmptyFixedArray();
if (!allocation.To(&obj)) return false;
}
set_empty_fixed_array(FixedArray::cast(obj));
{
AllocationResult allocation = Allocate(null_map(), OLD_POINTER_SPACE);
if (!allocation.To(&obj)) return false;
}
set_null_value(Oddball::cast(obj));
Oddball::cast(obj)->set_kind(Oddball::kNull);
{
AllocationResult allocation = Allocate(undefined_map(), OLD_POINTER_SPACE);
if (!allocation.To(&obj)) return false;
}
set_undefined_value(Oddball::cast(obj));
Oddball::cast(obj)->set_kind(Oddball::kUndefined);
DCHECK(!InNewSpace(undefined_value()));
// Set preliminary exception sentinel value before actually initializing it.
set_exception(null_value());
// Allocate the empty descriptor array.
{
AllocationResult allocation = AllocateEmptyFixedArray();
if (!allocation.To(&obj)) return false;
}
set_empty_descriptor_array(DescriptorArray::cast(obj));
// Allocate the constant pool array.
{
AllocationResult allocation = AllocateEmptyConstantPoolArray();
if (!allocation.To(&obj)) return false;
}
set_empty_constant_pool_array(ConstantPoolArray::cast(obj));
// Fix the instance_descriptors for the existing maps.
meta_map()->set_code_cache(empty_fixed_array());
meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
meta_map()->init_back_pointer(undefined_value());
meta_map()->set_instance_descriptors(empty_descriptor_array());
fixed_array_map()->set_code_cache(empty_fixed_array());
fixed_array_map()->set_dependent_code(
DependentCode::cast(empty_fixed_array()));
fixed_array_map()->init_back_pointer(undefined_value());
fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
undefined_map()->set_code_cache(empty_fixed_array());
undefined_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
undefined_map()->init_back_pointer(undefined_value());
undefined_map()->set_instance_descriptors(empty_descriptor_array());
null_map()->set_code_cache(empty_fixed_array());
null_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
null_map()->init_back_pointer(undefined_value());
null_map()->set_instance_descriptors(empty_descriptor_array());
constant_pool_array_map()->set_code_cache(empty_fixed_array());
constant_pool_array_map()->set_dependent_code(
DependentCode::cast(empty_fixed_array()));
constant_pool_array_map()->init_back_pointer(undefined_value());
constant_pool_array_map()->set_instance_descriptors(empty_descriptor_array());
// Fix prototype object for existing maps.
meta_map()->set_prototype(null_value());
meta_map()->set_constructor(null_value());
fixed_array_map()->set_prototype(null_value());
fixed_array_map()->set_constructor(null_value());
undefined_map()->set_prototype(null_value());
undefined_map()->set_constructor(null_value());
null_map()->set_prototype(null_value());
null_map()->set_constructor(null_value());
constant_pool_array_map()->set_prototype(null_value());
constant_pool_array_map()->set_constructor(null_value());
{ // Map allocation
#define ALLOCATE_MAP(instance_type, size, field_name) \
{ \
Map* map; \
if (!AllocateMap((instance_type), size).To(&map)) return false; \
set_##field_name##_map(map); \
}
#define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
DCHECK(fixed_array_map() != fixed_cow_array_map());
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
mutable_heap_number)
ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean);
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel);
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
const StringTypeTable& entry = string_type_table[i];
{
AllocationResult allocation = AllocateMap(entry.type, entry.size);
if (!allocation.To(&obj)) return false;
}
// Mark cons string maps as unstable, because their objects can change
// maps during GC.
Map* map = Map::cast(obj);
if (StringShape(entry.type).IsCons()) map->mark_unstable();
roots_[entry.index] = map;
}
ALLOCATE_VARSIZE_MAP(STRING_TYPE, undetectable_string)
undetectable_string_map()->set_is_undetectable();
ALLOCATE_VARSIZE_MAP(ASCII_STRING_TYPE, undetectable_ascii_string);
undetectable_ascii_string_map()->set_is_undetectable();
ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
#define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size) \
ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize, \
external_##type##_array)
TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP)
#undef ALLOCATE_EXTERNAL_ARRAY_MAP
#define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
#undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
const StructTable& entry = struct_table[i];
Map* map;
if (!AllocateMap(entry.type, entry.size).To(&map)) return false;
roots_[entry.index] = map;
}
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, ordered_hash_table)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, global_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
native_context_map()->set_dictionary_map(true);
native_context_map()->set_visitor_id(
StaticVisitorBase::kVisitNativeContext);
ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
shared_function_info)
ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external)
external_map()->set_is_extensible(false);
#undef ALLOCATE_VARSIZE_MAP
#undef ALLOCATE_MAP
}
{ // Empty arrays
{
ByteArray* byte_array;
if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
set_empty_byte_array(byte_array);
}
#define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size) \
{ \
ExternalArray* obj; \
if (!AllocateEmptyExternalArray(kExternal##Type##Array).To(&obj)) \
return false; \
set_empty_external_##type##_array(obj); \
}
TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY)
#undef ALLOCATE_EMPTY_EXTERNAL_ARRAY
#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
{ \
FixedTypedArrayBase* obj; \
if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
return false; \
set_empty_fixed_##type##_array(obj); \
}
TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
#undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
}
DCHECK(!InNewSpace(empty_fixed_array()));
return true;
}
AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate heap numbers in paged
// spaces.
int size = HeapNumber::kSize;
STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
HeapObject* result;
{
AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
if (!allocation.To(&result)) return allocation;
}
Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map();
HeapObject::cast(result)->set_map_no_write_barrier(map);
HeapNumber::cast(result)->set_value(value);
return result;
}
AllocationResult Heap::AllocateCell(Object* value) {
int size = Cell::kSize;
STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
HeapObject* result;
{
AllocationResult allocation = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
if (!allocation.To(&result)) return allocation;
}
result->set_map_no_write_barrier(cell_map());
Cell::cast(result)->set_value(value);
return result;
}
AllocationResult Heap::AllocatePropertyCell() {
int size = PropertyCell::kSize;
STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);