Merge "Fix memory error when dumping timings."
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 5662f36..26fac23 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -473,6 +473,7 @@
for (size_t i = 0; i < dex_filenames.size(); i++) {
const char* dex_filename = dex_filenames[i];
const char* dex_location = dex_locations[i];
+ ATRACE_BEGIN(StringPrintf("Opening dex file '%s'", dex_filenames[i]).c_str());
std::string error_msg;
if (!OS::FileExists(dex_filename)) {
LOG(WARNING) << "Skipping non-existent dex file '" << dex_filename << "'";
@@ -485,6 +486,7 @@
} else {
dex_files.push_back(dex_file);
}
+ ATRACE_END();
}
return failure_count;
}
@@ -1005,6 +1007,7 @@
dex_files = Runtime::Current()->GetClassLinker()->GetBootClassPath();
} else {
if (dex_filenames.empty()) {
+ ATRACE_BEGIN("Opening zip archive from file descriptor");
std::string error_msg;
UniquePtr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(zip_fd, zip_location.c_str(),
&error_msg));
@@ -1020,6 +1023,7 @@
return EXIT_FAILURE;
}
dex_files.push_back(dex_file);
+ ATRACE_END();
} else {
size_t failure_count = OpenDexFiles(dex_filenames, dex_locations, dex_files);
if (failure_count > 0) {
diff --git a/runtime/Android.mk b/runtime/Android.mk
index a3a8bec..353f160 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -55,6 +55,7 @@
gc/collector/partial_mark_sweep.cc \
gc/collector/semi_space.cc \
gc/collector/sticky_mark_sweep.cc \
+ gc/gc_cause.cc \
gc/heap.cc \
gc/reference_queue.cc \
gc/space/bump_pointer_space.cc \
@@ -229,7 +230,11 @@
arch/mips/quick_entrypoints_mips.S \
arch/mips/thread_mips.cc
else # TARGET_ARCH != mips
+ifeq ($(TARGET_ARCH),aarch64)
+$(info TODOAArch64: $(LOCAL_PATH)/Android.mk Add AArch64 specific runtime files)
+else
$(error unsupported TARGET_ARCH=$(TARGET_ARCH))
+endif # TARGET_ARCH != aarch64
endif # TARGET_ARCH != mips
endif # TARGET_ARCH != x86
endif # TARGET_ARCH != x86_64
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 4ea1366..2141997 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -847,14 +847,18 @@
JDWP::JdwpError Dbg::IsCollected(JDWP::ObjectId object_id, bool& is_collected)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
- // JDWP specs state an INVALID_OBJECT error is returned if the object ID is not valid. However
- // the RI seems to ignore this and does not return any error in this case. Let's comply with
- // JDWP specs here.
- if (o == NULL || o == ObjectRegistry::kInvalidObject) {
+ if (object_id == 0) {
+ // Null object id is invalid.
return JDWP::ERR_INVALID_OBJECT;
}
- is_collected = gRegistry->IsCollected(object_id);
+ // JDWP specs state an INVALID_OBJECT error is returned if the object ID is not valid. However
+ // the RI seems to ignore this and assume object has been collected.
+ mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
+ if (o == NULL || o == ObjectRegistry::kInvalidObject) {
+ is_collected = true;
+ } else {
+ is_collected = gRegistry->IsCollected(object_id);
+ }
return JDWP::ERR_NONE;
}
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 28428cc..25e8966 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -37,6 +37,7 @@
GarbageCollector::GarbageCollector(Heap* heap, const std::string& name)
: heap_(heap),
name_(name),
+ gc_cause_(kGcCauseForAlloc),
clear_soft_references_(false),
verbose_(VLOG_IS_ON(heap)),
duration_ns_(0),
@@ -63,13 +64,14 @@
total_freed_bytes_ = 0;
}
-void GarbageCollector::Run(bool clear_soft_references) {
+void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) {
ThreadList* thread_list = Runtime::Current()->GetThreadList();
Thread* self = Thread::Current();
uint64_t start_time = NanoTime();
pause_times_.clear();
duration_ns_ = 0;
clear_soft_references_ = clear_soft_references;
+ gc_cause_ = gc_cause;
// Reset stats.
freed_bytes_ = 0;
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index 1779339..088f1d4 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -19,6 +19,7 @@
#include "base/histogram.h"
#include "base/timing_logger.h"
+#include "gc/gc_cause.h"
#include "gc_type.h"
#include "locks.h"
#include <stdint.h>
@@ -46,7 +47,7 @@
virtual GcType GetGcType() const = 0;
// Run the garbage collector.
- void Run(bool clear_soft_references);
+ void Run(GcCause gc_cause, bool clear_soft_references);
Heap* GetHeap() const {
return heap_;
@@ -133,6 +134,7 @@
std::string name_;
+ GcCause gc_cause_;
bool clear_soft_references_;
const bool verbose_;
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 113139b..99c726d 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -62,12 +62,6 @@
static constexpr bool kProtectFromSpace = true;
static constexpr bool kResetFromSpace = true;
-// TODO: move these to a new file as a new garbage collector?
-// If true, 'promote' some objects from the bump pointer spaces to the non-moving space.
-static constexpr bool kEnableSimplePromo = false;
-// If true, collect the bump pointer spaces only, as opposed to the
-// whole heap in some collections.
-static constexpr bool kEnableBumpPointerSpacesOnlyCollection = false;
// TODO: Unduplicate logic.
void SemiSpace::ImmuneSpace(space::ContinuousSpace* space) {
@@ -93,10 +87,10 @@
// being sorted by Heap::AddContinuousSpace.
if (prev_space != nullptr && IsImmuneSpace(prev_space)) {
immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
- // Use Limit() instead of End() because otherwise if
- // kEnableBumpPointerSpacesOnlyCollection is true, the alloc
- // space might expand due to promotion and the sense of immunity
- // may change in the middle of a GC.
+ // Use Limit() instead of End() because otherwise if the
+ // generational mode is enabled, the alloc space might expand
+ // due to promotion and the sense of immunity may change in the
+ // middle of a GC.
immune_end_ = std::max(reinterpret_cast<Object*>(space->Limit()), immune_end_);
}
}
@@ -115,14 +109,14 @@
// Add the main free list space and the non-moving
// space to the immune space if a bump pointer space
// only collection.
- || (kEnableBumpPointerSpacesOnlyCollection &&
- !whole_heap_collection_ && (space == GetHeap()->GetNonMovingSpace() ||
- space == GetHeap()->GetPrimaryFreeListSpace()))) {
+ || (generational_ && !whole_heap_collection_ &&
+ (space == GetHeap()->GetNonMovingSpace() ||
+ space == GetHeap()->GetPrimaryFreeListSpace()))) {
ImmuneSpace(space);
}
}
}
- if (kEnableBumpPointerSpacesOnlyCollection && !whole_heap_collection_) {
+ if (generational_ && !whole_heap_collection_) {
// We won't collect the large object space if a bump pointer space only collection.
is_large_object_space_immune_ = true;
GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked();
@@ -130,7 +124,7 @@
timings_.EndSplit();
}
-SemiSpace::SemiSpace(Heap* heap, const std::string& name_prefix)
+SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix)
: GarbageCollector(heap,
name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"),
mark_stack_(nullptr),
@@ -140,6 +134,7 @@
to_space_(nullptr),
from_space_(nullptr),
self_(nullptr),
+ generational_(generational),
last_gc_to_space_end_(nullptr),
bytes_promoted_(0),
whole_heap_collection_(true),
@@ -170,10 +165,12 @@
}
void SemiSpace::MarkingPhase() {
- if (kEnableBumpPointerSpacesOnlyCollection) {
- if (clear_soft_references_) {
- // If we want to collect as much as possible, collect the whole
- // heap (and reset the interval counter to be consistent.)
+ if (generational_) {
+ if (gc_cause_ == kGcCauseExplicit || gc_cause_ == kGcCauseForNativeAlloc ||
+ clear_soft_references_) {
+ // If an explicit, native allocation-triggered, or last attempt
+ // collection, collect the whole heap (and reset the interval
+ // counter to be consistent.)
whole_heap_collection_ = true;
whole_heap_collection_interval_counter_ = 0;
}
@@ -189,7 +186,7 @@
// Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
// wrong space.
heap_->SwapSemiSpaces();
- if (kEnableSimplePromo) {
+ if (generational_) {
// If last_gc_to_space_end_ is out of the bounds of the from-space
// (the to-space from last GC), then point it to the beginning of
// the from-space. For example, the very first GC or the
@@ -243,7 +240,7 @@
// space is added to the immune space. But the non-moving
// space doesn't have a mod union table. Instead, its live
// bitmap will be scanned later in MarkReachableObjects().
- DCHECK(kEnableBumpPointerSpacesOnlyCollection && !whole_heap_collection_ &&
+ DCHECK(generational_ && !whole_heap_collection_ &&
(space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace()));
}
}
@@ -278,7 +275,7 @@
// (including the objects on the live stack which have just marked
// in the live bitmap above in MarkAllocStackAsLive().)
if (IsImmuneSpace(space) && heap_->FindModUnionTableFromSpace(space) == nullptr) {
- DCHECK(kEnableBumpPointerSpacesOnlyCollection && !whole_heap_collection_ &&
+ DCHECK(generational_ && !whole_heap_collection_ &&
(space == GetHeap()->GetNonMovingSpace() || space == GetHeap()->GetPrimaryFreeListSpace()));
accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
SemiSpaceScanObjectVisitor visitor(this);
@@ -289,7 +286,7 @@
}
if (is_large_object_space_immune_) {
- DCHECK(kEnableBumpPointerSpacesOnlyCollection && !whole_heap_collection_);
+ DCHECK(generational_ && !whole_heap_collection_);
// When the large object space is immune, we need to scan the
// large object space as roots as they contain references to their
// classes (primitive array classes) that could move though they
@@ -359,7 +356,7 @@
mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_READ);
}
- if (kEnableSimplePromo) {
+ if (generational_) {
// Record the end (top) of the to space so we can distinguish
// between objects that were allocated since the last GC and the
// older objects.
@@ -401,7 +398,7 @@
size_t object_size = obj->SizeOf();
size_t bytes_allocated;
mirror::Object* forward_address = nullptr;
- if (kEnableSimplePromo && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) {
+ if (generational_ && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) {
// If it's allocated before the last GC (older), move
// (pseudo-promote) it to the main free list space (as sort
// of an old generation.)
@@ -420,27 +417,25 @@
accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap();
DCHECK(mark_bitmap != nullptr);
DCHECK(!live_bitmap->Test(forward_address));
- if (kEnableBumpPointerSpacesOnlyCollection && !whole_heap_collection_) {
+ if (!whole_heap_collection_) {
// If collecting the bump pointer spaces only, live_bitmap == mark_bitmap.
DCHECK_EQ(live_bitmap, mark_bitmap);
- // If a bump pointer space only collection (and the
- // promotion is enabled,) delay the live bitmap marking
- // of the promoted object until it's popped off the mark
- // stack (ProcessMarkStack()). The rationale: we may be
- // in the middle of scanning the objects in the
- // promo destination space for
+ // If a bump pointer space only collection, delay the live
+ // bitmap marking of the promoted object until it's popped off
+ // the mark stack (ProcessMarkStack()). The rationale: we may
+ // be in the middle of scanning the objects in the promo
+ // destination space for
// non-moving-space-to-bump-pointer-space references by
// iterating over the marked bits of the live bitmap
- // (MarkReachableObjects()). If we don't delay it (and
- // instead mark the promoted object here), the above
- // promo destination space scan could encounter the
- // just-promoted object and forward the references in
- // the promoted object's fields even through it is
- // pushed onto the mark stack. If this happens, the
- // promoted object would be in an inconsistent state,
- // that is, it's on the mark stack (gray) but its fields
- // are already forwarded (black), which would cause a
+ // (MarkReachableObjects()). If we don't delay it (and instead
+ // mark the promoted object here), the above promo destination
+ // space scan could encounter the just-promoted object and
+ // forward the references in the promoted object's fields even
+ // through it is pushed onto the mark stack. If this happens,
+ // the promoted object would be in an inconsistent state, that
+ // is, it's on the mark stack (gray) but its fields are
+ // already forwarded (black), which would cause a
// DCHECK(!to_space_->HasAddress(obj)) failure below.
} else {
// Mark forward_address on the live bit map.
@@ -462,7 +457,7 @@
to_space_live_bitmap_->Set(forward_address);
}
DCHECK(to_space_->HasAddress(forward_address) ||
- (kEnableSimplePromo && GetHeap()->GetPrimaryFreeListSpace()->HasAddress(forward_address)));
+ (generational_ && GetHeap()->GetPrimaryFreeListSpace()->HasAddress(forward_address)));
return forward_address;
}
@@ -489,7 +484,7 @@
} else {
accounting::SpaceBitmap* object_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
if (LIKELY(object_bitmap != nullptr)) {
- if (kEnableBumpPointerSpacesOnlyCollection) {
+ if (generational_) {
// If a bump pointer space only collection, we should not
// reach here as we don't/won't mark the objects in the
// non-moving space (except for the promoted objects.) Note
@@ -623,7 +618,7 @@
void SemiSpace::ProcessMarkStack(bool paused) {
space::MallocSpace* promo_dest_space = NULL;
accounting::SpaceBitmap* live_bitmap = NULL;
- if (kEnableSimplePromo && kEnableBumpPointerSpacesOnlyCollection && !whole_heap_collection_) {
+ if (generational_ && !whole_heap_collection_) {
// If a bump pointer space only collection (and the promotion is
// enabled,) we delay the live-bitmap marking of promoted objects
// from MarkObject() until this function.
@@ -637,8 +632,7 @@
timings_.StartSplit(paused ? "(paused)ProcessMarkStack" : "ProcessMarkStack");
while (!mark_stack_->IsEmpty()) {
Object* obj = mark_stack_->PopBack();
- if (kEnableSimplePromo && kEnableBumpPointerSpacesOnlyCollection && !whole_heap_collection_ &&
- promo_dest_space->HasAddress(obj)) {
+ if (generational_ && !whole_heap_collection_ && promo_dest_space->HasAddress(obj)) {
// obj has just been promoted. Mark the live bitmap for it,
// which is delayed from MarkObject().
DCHECK(!live_bitmap->Test(obj));
@@ -728,7 +722,7 @@
space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
large_objects->GetMarkObjects()->Clear();
- if (kEnableBumpPointerSpacesOnlyCollection) {
+ if (generational_) {
// Decide whether to do a whole heap collection or a bump pointer
// only space collection at the next collection by updating
// whole_heap_collection. Enable whole_heap_collection once every
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index ba9f0f6..bf129a3 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -63,7 +63,8 @@
class SemiSpace : public GarbageCollector {
public:
- explicit SemiSpace(Heap* heap, const std::string& name_prefix = "");
+ explicit SemiSpace(Heap* heap, bool generational = false,
+ const std::string& name_prefix = "");
~SemiSpace() {}
@@ -274,25 +275,31 @@
Thread* self_;
- // Used for kEnableSimplePromo. The end/top of the bump pointer
- // space at the end of the last collection.
+ // When true, the generational mode (promotion and the bump pointer
+ // space only collection) is enabled. TODO: move these to a new file
+ // as a new garbage collector?
+ bool generational_;
+
+ // Used for the generational mode. the end/top of the bump
+ // pointer space at the end of the last collection.
byte* last_gc_to_space_end_;
- // Used for kEnableSimplePromo. During a collection, keeps track of
- // how many bytes of objects have been copied so far from the bump
- // pointer space to the non-moving space.
+ // Used for the generational mode. During a collection, keeps track
+ // of how many bytes of objects have been copied so far from the
+ // bump pointer space to the non-moving space.
uint64_t bytes_promoted_;
- // When true, collect the whole heap. When false, collect only the
- // bump pointer spaces.
+ // Used for the generational mode. When true, collect the whole
+ // heap. When false, collect only the bump pointer spaces.
bool whole_heap_collection_;
- // A counter used to enable whole_heap_collection_ once per
- // interval.
+ // Used for the generational mode. A counter used to enable
+ // whole_heap_collection_ once per interval.
int whole_heap_collection_interval_counter_;
- // The default interval of the whole heap collection. If N, the
- // whole heap collection occurs every N collections.
+ // Used for the generational mode. The default interval of the whole
+ // heap collection. If N, the whole heap collection occurs every N
+ // collections.
static constexpr int kDefaultWholeHeapCollectionInterval = 5;
private:
diff --git a/runtime/gc/collector_type.h b/runtime/gc/collector_type.h
index 06395cf..4bc9ad2 100644
--- a/runtime/gc/collector_type.h
+++ b/runtime/gc/collector_type.h
@@ -32,6 +32,8 @@
kCollectorTypeCMS,
// Semi-space / mark-sweep hybrid, enables compaction.
kCollectorTypeSS,
+ // A generational variant of kCollectorTypeSS.
+ kCollectorTypeGSS,
};
std::ostream& operator<<(std::ostream& os, const CollectorType& collector_type);
diff --git a/runtime/gc/gc_cause.cc b/runtime/gc/gc_cause.cc
new file mode 100644
index 0000000..b25f7ff
--- /dev/null
+++ b/runtime/gc/gc_cause.cc
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gc_cause.h"
+#include "globals.h"
+#include "base/logging.h"
+
+#include <ostream>
+
+namespace art {
+namespace gc {
+
+const char* PrettyCause(GcCause cause) {
+ switch (cause) {
+ case kGcCauseForAlloc: return "Alloc";
+ case kGcCauseBackground: return "Background";
+ case kGcCauseExplicit: return "Explicit";
+ case kGcCauseForNativeAlloc: return "NativeAlloc";
+ case kGcCauseCollectorTransition: return" CollectorTransition";
+ default:
+ LOG(FATAL) << "Unreachable";
+ }
+ return "";
+}
+
+std::ostream& operator<<(std::ostream& os, const GcCause& gc_cause) {
+ os << PrettyCause(gc_cause);
+ return os;
+}
+
+} // namespace gc
+} // namespace art
diff --git a/runtime/gc/gc_cause.h b/runtime/gc/gc_cause.h
new file mode 100644
index 0000000..7499b9e
--- /dev/null
+++ b/runtime/gc/gc_cause.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_GC_CAUSE_H_
+#define ART_RUNTIME_GC_GC_CAUSE_H_
+
+#include <ostream>
+
+namespace art {
+namespace gc {
+
+// What caused the GC?
+enum GcCause {
+ // GC triggered by a failed allocation. Thread doing allocation is blocked waiting for GC before
+ // retrying allocation.
+ kGcCauseForAlloc,
+ // A background GC trying to ensure there is free memory ahead of allocations.
+ kGcCauseBackground,
+ // An explicit System.gc() call.
+ kGcCauseExplicit,
+ // GC triggered for a native allocation.
+ kGcCauseForNativeAlloc,
+ // GC triggered for a collector transition.
+ kGcCauseCollectorTransition,
+};
+
+const char* PrettyCause(GcCause cause);
+std::ostream& operator<<(std::ostream& os, const GcCause& gc_cause);
+
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_GC_CAUSE_H_
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 56e3e00..6d30e1c 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -273,7 +273,8 @@
}
if (kMovingCollector) {
// TODO: Clean this up.
- semi_space_collector_ = new collector::SemiSpace(this);
+ bool generational = post_zygote_collector_type_ == kCollectorTypeGSS;
+ semi_space_collector_ = new collector::SemiSpace(this, generational);
garbage_collectors_.push_back(semi_space_collector_);
}
@@ -1165,7 +1166,8 @@
}
tl->SuspendAll();
switch (collector_type) {
- case kCollectorTypeSS: {
+ case kCollectorTypeSS:
+ case kCollectorTypeGSS: {
mprotect(temp_space_->Begin(), temp_space_->Capacity(), PROT_READ | PROT_WRITE);
CHECK(main_space_ != nullptr);
Compact(temp_space_, main_space_);
@@ -1179,7 +1181,7 @@
case kCollectorTypeMS:
// Fall through.
case kCollectorTypeCMS: {
- if (collector_type_ == kCollectorTypeSS) {
+ if (collector_type_ == kCollectorTypeSS || collector_type_ == kCollectorTypeGSS) {
// TODO: Use mem-map from temp space?
MemMap* mem_map = allocator_mem_map_.release();
CHECK(mem_map != nullptr);
@@ -1233,7 +1235,8 @@
collector_type_ = collector_type;
gc_plan_.clear();
switch (collector_type_) {
- case kCollectorTypeSS: {
+ case kCollectorTypeSS:
+ case kCollectorTypeGSS: {
concurrent_gc_ = false;
gc_plan_.push_back(collector::kGcTypeFull);
if (use_tlab_) {
@@ -1388,7 +1391,7 @@
temp_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
zygote_collector.SetFromSpace(bump_pointer_space_);
zygote_collector.SetToSpace(&target_space);
- zygote_collector.Run(false);
+ zygote_collector.Run(kGcCauseCollectorTransition, false);
CHECK(temp_space_->IsEmpty());
total_objects_freed_ever_ += semi_space_collector_->GetFreedObjects();
total_bytes_freed_ever_ += semi_space_collector_->GetFreedBytes();
@@ -1469,17 +1472,6 @@
}
}
-const char* PrettyCause(GcCause cause) {
- switch (cause) {
- case kGcCauseForAlloc: return "Alloc";
- case kGcCauseBackground: return "Background";
- case kGcCauseExplicit: return "Explicit";
- default:
- LOG(FATAL) << "Unreachable";
- }
- return "";
-}
-
void Heap::SwapSemiSpaces() {
// Swap the spaces so we allocate into the space which we just evacuated.
std::swap(bump_pointer_space_, temp_space_);
@@ -1492,7 +1484,7 @@
if (target_space != source_space) {
semi_space_collector_->SetFromSpace(source_space);
semi_space_collector_->SetToSpace(target_space);
- semi_space_collector_->Run(false);
+ semi_space_collector_->Run(kGcCauseCollectorTransition, false);
}
}
@@ -1541,7 +1533,7 @@
collector::GarbageCollector* collector = nullptr;
// TODO: Clean this up.
- if (collector_type_ == kCollectorTypeSS) {
+ if (collector_type_ == kCollectorTypeSS || collector_type_ == kCollectorTypeGSS) {
DCHECK(current_allocator_ == kAllocatorTypeBumpPointer ||
current_allocator_ == kAllocatorTypeTLAB);
gc_type = semi_space_collector_->GetGcType();
@@ -1569,7 +1561,7 @@
ATRACE_BEGIN(StringPrintf("%s %s GC", PrettyCause(gc_cause), collector->GetName()).c_str());
- collector->Run(clear_soft_references);
+ collector->Run(gc_cause, clear_soft_references);
total_objects_freed_ever_ += collector->GetFreedObjects();
total_bytes_freed_ever_ += collector->GetFreedBytes();
@@ -2383,7 +2375,7 @@
}
// If we still are over the watermark, attempt a GC for alloc and run finalizers.
if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
- CollectGarbageInternal(gc_type, kGcCauseForAlloc, false);
+ CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
RunFinalization(env);
native_need_to_run_finalization_ = false;
CHECK(!env->ExceptionCheck());
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 465ee4c..0c3db86 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -25,6 +25,7 @@
#include "base/timing_logger.h"
#include "gc/accounting/atomic_stack.h"
#include "gc/accounting/card_table.h"
+#include "gc/gc_cause.h"
#include "gc/collector/gc_type.h"
#include "gc/collector_type.h"
#include "globals.h"
@@ -98,18 +99,6 @@
kAllocatorTypeLOS, // Large object space, also doesn't have entrypoints.
};
-// What caused the GC?
-enum GcCause {
- // GC triggered by a failed allocation. Thread doing allocation is blocked waiting for GC before
- // retrying allocation.
- kGcCauseForAlloc,
- // A background GC trying to ensure there is free memory ahead of allocations.
- kGcCauseBackground,
- // An explicit System.gc() call.
- kGcCauseExplicit,
-};
-std::ostream& operator<<(std::ostream& os, const GcCause& policy);
-
// How we want to sanity check the heap's correctness.
enum HeapVerificationMode {
kHeapVerificationNotPermitted, // Too early in runtime start-up for heap to be verified.
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 6a04c3a..4a84cfe 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -66,7 +66,8 @@
}
static void VMDebug_startMethodTracingFd(JNIEnv* env, jclass, jstring javaTraceFilename,
- jobject javaFd, jint bufferSize, jint flags) {
+ jobject javaFd, jint bufferSize, jint flags,
+ jboolean samplingEnabled, jint intervalUs) {
int originalFd = jniGetFDFromFileDescriptor(env, javaFd);
if (originalFd < 0) {
return;
@@ -85,16 +86,17 @@
if (traceFilename.c_str() == NULL) {
return;
}
- Trace::Start(traceFilename.c_str(), fd, bufferSize, flags, false, false, 0);
+ Trace::Start(traceFilename.c_str(), fd, bufferSize, flags, false, samplingEnabled, intervalUs);
}
static void VMDebug_startMethodTracingFilename(JNIEnv* env, jclass, jstring javaTraceFilename,
- jint bufferSize, jint flags) {
+ jint bufferSize, jint flags,
+ jboolean samplingEnabled, jint intervalUs) {
ScopedUtfChars traceFilename(env, javaTraceFilename);
if (traceFilename.c_str() == NULL) {
return;
}
- Trace::Start(traceFilename.c_str(), -1, bufferSize, flags, false, false, 0);
+ Trace::Start(traceFilename.c_str(), -1, bufferSize, flags, false, samplingEnabled, intervalUs);
}
static jint VMDebug_getMethodTracingMode(JNIEnv*, jclass) {
@@ -325,8 +327,8 @@
NATIVE_METHOD(VMDebug, startEmulatorTracing, "()V"),
NATIVE_METHOD(VMDebug, startInstructionCounting, "()V"),
NATIVE_METHOD(VMDebug, startMethodTracingDdmsImpl, "(IIZI)V"),
- NATIVE_METHOD(VMDebug, startMethodTracingFd, "(Ljava/lang/String;Ljava/io/FileDescriptor;II)V"),
- NATIVE_METHOD(VMDebug, startMethodTracingFilename, "(Ljava/lang/String;II)V"),
+ NATIVE_METHOD(VMDebug, startMethodTracingFd, "(Ljava/lang/String;Ljava/io/FileDescriptor;IIZI)V"),
+ NATIVE_METHOD(VMDebug, startMethodTracingFilename, "(Ljava/lang/String;IIZI)V"),
NATIVE_METHOD(VMDebug, stopAllocCounting, "()V"),
NATIVE_METHOD(VMDebug, stopEmulatorTracing, "()V"),
NATIVE_METHOD(VMDebug, stopInstructionCounting, "()V"),
diff --git a/runtime/object_utils.h b/runtime/object_utils.h
index 084e1e2..407aa65 100644
--- a/runtime/object_utils.h
+++ b/runtime/object_utils.h
@@ -417,12 +417,12 @@
return GetDexFile().GetProtoParameters(proto);
}
- mirror::Class* GetReturnType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Class* GetReturnType(bool resolve = true) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
const DexFile& dex_file = GetDexFile();
const DexFile::MethodId& method_id = dex_file.GetMethodId(method_->GetDexMethodIndex());
const DexFile::ProtoId& proto_id = dex_file.GetMethodPrototype(method_id);
uint16_t return_type_idx = proto_id.return_type_idx_;
- return GetClassFromTypeIdx(return_type_idx);
+ return GetClassFromTypeIdx(return_type_idx, resolve);
}
const char* GetReturnTypeDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -532,10 +532,10 @@
return method_->GetDexCacheResolvedTypes()->Get(type_idx) != NULL;
}
- mirror::Class* GetClassFromTypeIdx(uint16_t type_idx)
+ mirror::Class* GetClassFromTypeIdx(uint16_t type_idx, bool resolve = true)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::Class* type = method_->GetDexCacheResolvedTypes()->Get(type_idx);
- if (type == NULL) {
+ if (type == NULL && resolve) {
type = GetClassLinker()->ResolveType(type_idx, method_);
CHECK(type != NULL || Thread::Current()->IsExceptionPending());
}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 91d9b94..2af569a 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -367,6 +367,8 @@
collector_type = gc::kCollectorTypeCMS;
} else if (gc_options[i] == "SS") {
collector_type = gc::kCollectorTypeSS;
+ } else if (gc_options[i] == "GSS") {
+ collector_type = gc::kCollectorTypeGSS;
} else {
LOG(WARNING) << "Ignoring unknown -Xgc option: " << gc_options[i];
return gc::kCollectorTypeNone;
diff --git a/runtime/signal_catcher.cc b/runtime/signal_catcher.cc
index fe62e25..611c0a8 100644
--- a/runtime/signal_catcher.cc
+++ b/runtime/signal_catcher.cc
@@ -145,12 +145,13 @@
}
os << "----- end " << getpid() << " -----\n";
CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
+ self->EndAssertNoThreadSuspension(old_cause);
+ thread_list->ResumeAll();
+ // Run the checkpoints after resuming the threads to prevent deadlocks if the checkpoint function
+ // acquires the mutator lock.
if (self->ReadFlag(kCheckpointRequest)) {
self->RunCheckpointFunction();
}
- self->EndAssertNoThreadSuspension(old_cause);
- thread_list->ResumeAll();
-
Output(os.str());
}
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 5d053b6..b0f6e37 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -299,6 +299,7 @@
void* Trace::RunSamplingThread(void* arg) {
Runtime* runtime = Runtime::Current();
int interval_us = reinterpret_cast<int>(arg);
+ CHECK_GE(interval_us, 0);
CHECK(runtime->AttachCurrentThread("Sampling Profiler", true, runtime->GetSystemThreadGroup(),
!runtime->IsCompiler()));
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 9a2de47..d2681df 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -2129,13 +2129,13 @@
const RegType* return_type = nullptr;
if (called_method != nullptr) {
MethodHelper mh(called_method);
- mirror::Class* return_type_class = mh.GetReturnType();
+ mirror::Class* return_type_class = mh.GetReturnType(can_load_classes_);
if (return_type_class != nullptr) {
return_type = ®_types_.FromClass(mh.GetReturnTypeDescriptor(), return_type_class,
return_type_class->CannotBeAssignedFromOtherTypes());
} else {
Thread* self = Thread::Current();
- DCHECK(self->IsExceptionPending());
+ DCHECK(!can_load_classes_ || self->IsExceptionPending());
self->ClearException();
}
}
@@ -3518,10 +3518,14 @@
const RegType* field_type = nullptr;
if (field != NULL) {
FieldHelper fh(field);
- mirror::Class* field_type_class = fh.GetType(false);
+ mirror::Class* field_type_class = fh.GetType(can_load_classes_);
if (field_type_class != nullptr) {
field_type = ®_types_.FromClass(fh.GetTypeDescriptor(), field_type_class,
field_type_class->CannotBeAssignedFromOtherTypes());
+ } else {
+ Thread* self = Thread::Current();
+ DCHECK(!can_load_classes_ || self->IsExceptionPending());
+ self->ClearException();
}
}
if (field_type == nullptr) {
@@ -3580,10 +3584,14 @@
return;
}
FieldHelper fh(field);
- mirror::Class* field_type_class = fh.GetType(false);
+ mirror::Class* field_type_class = fh.GetType(can_load_classes_);
if (field_type_class != nullptr) {
field_type = ®_types_.FromClass(fh.GetTypeDescriptor(), field_type_class,
field_type_class->CannotBeAssignedFromOtherTypes());
+ } else {
+ Thread* self = Thread::Current();
+ DCHECK(!can_load_classes_ || self->IsExceptionPending());
+ self->ClearException();
}
}
if (field_type == nullptr) {
@@ -3673,12 +3681,15 @@
return;
}
FieldHelper fh(field);
- mirror::Class* field_type_class = fh.GetType(false);
+ mirror::Class* field_type_class = fh.GetType(can_load_classes_);
const RegType* field_type;
if (field_type_class != nullptr) {
field_type = ®_types_.FromClass(fh.GetTypeDescriptor(), field_type_class,
field_type_class->CannotBeAssignedFromOtherTypes());
} else {
+ Thread* self = Thread::Current();
+ DCHECK(!can_load_classes_ || self->IsExceptionPending());
+ self->ClearException();
field_type = ®_types_.FromDescriptor(field->GetDeclaringClass()->GetClassLoader(),
fh.GetTypeDescriptor(), false);
}
@@ -3856,13 +3867,13 @@
if (return_type_ == nullptr) {
if (mirror_method_ != NULL) {
MethodHelper mh(mirror_method_);
- mirror::Class* return_type_class = mh.GetReturnType();
+ mirror::Class* return_type_class = mh.GetReturnType(can_load_classes_);
if (return_type_class != nullptr) {
return_type_ = ®_types_.FromClass(mh.GetReturnTypeDescriptor(), return_type_class,
return_type_class->CannotBeAssignedFromOtherTypes());
} else {
Thread* self = Thread::Current();
- DCHECK(self->IsExceptionPending());
+ DCHECK(!can_load_classes_ || self->IsExceptionPending());
self->ClearException();
}
}