Merge "Fix instrumentation exit stub for arm64" into mnc-dev
diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h
index 0876499..d9a5ac6 100644
--- a/compiler/jit/jit_compiler.h
+++ b/compiler/jit/jit_compiler.h
@@ -67,10 +67,11 @@
const uint8_t* mapping_table, const uint8_t* vmap_table, const uint8_t* gc_map);
bool MakeExecutable(CompiledMethod* compiled_method, mirror::ArtMethod* method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ DISALLOW_COPY_AND_ASSIGN(JitCompiler);
};
} // namespace jit
-
} // namespace art
#endif // ART_COMPILER_JIT_JIT_COMPILER_H_
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 8490afb..b4a45c6 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -691,6 +691,8 @@
include_cfi = false;
} else if (option == "--debuggable") {
debuggable = true;
+ include_debug_symbols = true;
+ include_cfi = true;
} else if (option.starts_with("--profile-file=")) {
profile_file_ = option.substr(strlen("--profile-file=")).data();
VLOG(compiler) << "dex2oat: profile file is " << profile_file_;
diff --git a/runtime/gc/accounting/bitmap.h b/runtime/gc/accounting/bitmap.h
index b294d49..eb00472 100644
--- a/runtime/gc/accounting/bitmap.h
+++ b/runtime/gc/accounting/bitmap.h
@@ -121,7 +121,7 @@
const size_t bitmap_size_;
private:
- DISALLOW_COPY_AND_ASSIGN(Bitmap);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Bitmap);
};
// One bit per kAlignment in range (start, end]
@@ -184,6 +184,8 @@
uintptr_t const cover_begin_;
uintptr_t const cover_end_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryRangeBitmap);
};
} // namespace accounting
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index 75ef58a..34e6aa3 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -146,6 +146,8 @@
// Card table doesn't begin at the beginning of the mem_map_, instead it is displaced by offset
// to allow the byte value of biased_begin_ to equal GC_CARD_DIRTY
const size_t offset_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(CardTable);
};
} // namespace accounting
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 93de035..60ea6b6 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -279,7 +279,7 @@
friend class FlipCallback;
friend class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor;
- DISALLOW_COPY_AND_ASSIGN(ConcurrentCopying);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying);
};
} // namespace collector
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index c5a8d5d..9b76d1a 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -190,6 +190,9 @@
int64_t total_freed_bytes_;
CumulativeLogger cumulative_timings_;
mutable Mutex pause_histogram_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(GarbageCollector);
};
} // namespace collector
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index 4337644..f59a2cd 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -251,7 +251,7 @@
friend class UpdateReferenceVisitor;
friend class UpdateRootVisitor;
- DISALLOW_COPY_AND_ASSIGN(MarkCompact);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MarkCompact);
};
} // namespace collector
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index fad3403..7e1af7b 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -336,7 +336,7 @@
friend class VerifyRootMarkedVisitor;
friend class VerifyRootVisitor;
- DISALLOW_COPY_AND_ASSIGN(MarkSweep);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MarkSweep);
};
} // namespace collector
diff --git a/runtime/gc/collector/partial_mark_sweep.h b/runtime/gc/collector/partial_mark_sweep.h
index ac0d068..1a211cd 100644
--- a/runtime/gc/collector/partial_mark_sweep.h
+++ b/runtime/gc/collector/partial_mark_sweep.h
@@ -40,7 +40,7 @@
virtual void BindBitmaps() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
- DISALLOW_COPY_AND_ASSIGN(PartialMarkSweep);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PartialMarkSweep);
};
} // namespace collector
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 61fbead..3c25f53 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -278,7 +278,7 @@
private:
friend class BitmapSetSlowPathVisitor;
- DISALLOW_COPY_AND_ASSIGN(SemiSpace);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SemiSpace);
};
} // namespace collector
diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h
index 4f9dabf..b9ef137 100644
--- a/runtime/gc/collector/sticky_mark_sweep.h
+++ b/runtime/gc/collector/sticky_mark_sweep.h
@@ -47,7 +47,7 @@
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
private:
- DISALLOW_COPY_AND_ASSIGN(StickyMarkSweep);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StickyMarkSweep);
};
} // namespace collector
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 3e56205..fbf36e8 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -371,11 +371,8 @@
}
inline Heap::AllocationTimer::AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr)
- : heap_(heap), allocated_obj_ptr_(allocated_obj_ptr) {
- if (kMeasureAllocationTime) {
- allocation_start_time_ = NanoTime() / kTimeAdjust;
- }
-}
+ : heap_(heap), allocated_obj_ptr_(allocated_obj_ptr),
+ allocation_start_time_(kMeasureAllocationTime ? NanoTime() / kTimeAdjust : 0u) { }
inline Heap::AllocationTimer::~AllocationTimer() {
if (kMeasureAllocationTime) {
@@ -419,7 +416,7 @@
inline void Heap::CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated,
mirror::Object** obj) {
if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) {
- RequestConcurrentGCAndSaveObject(self, obj);
+ RequestConcurrentGCAndSaveObject(self, false, obj);
}
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index b80c4b6..cbbc76c 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -3325,20 +3325,24 @@
*object = soa.Decode<mirror::Object*>(arg.get());
}
-void Heap::RequestConcurrentGCAndSaveObject(Thread* self, mirror::Object** obj) {
+void Heap::RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, mirror::Object** obj) {
StackHandleScope<1> hs(self);
HandleWrapper<mirror::Object> wrapper(hs.NewHandleWrapper(obj));
- RequestConcurrentGC(self);
+ RequestConcurrentGC(self, force_full);
}
class Heap::ConcurrentGCTask : public HeapTask {
public:
- explicit ConcurrentGCTask(uint64_t target_time) : HeapTask(target_time) { }
+ explicit ConcurrentGCTask(uint64_t target_time, bool force_full)
+ : HeapTask(target_time), force_full_(force_full) { }
virtual void Run(Thread* self) OVERRIDE {
gc::Heap* heap = Runtime::Current()->GetHeap();
- heap->ConcurrentGC(self);
+ heap->ConcurrentGC(self, force_full_);
heap->ClearConcurrentGCRequest();
}
+
+ private:
+ const bool force_full_; // If true, force full (or partial) collection.
};
static bool CanAddHeapTask(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_) {
@@ -3351,24 +3355,30 @@
concurrent_gc_pending_.StoreRelaxed(false);
}
-void Heap::RequestConcurrentGC(Thread* self) {
+void Heap::RequestConcurrentGC(Thread* self, bool force_full) {
if (CanAddHeapTask(self) &&
concurrent_gc_pending_.CompareExchangeStrongSequentiallyConsistent(false, true)) {
- task_processor_->AddTask(self, new ConcurrentGCTask(NanoTime())); // Start straight away.
+ task_processor_->AddTask(self, new ConcurrentGCTask(NanoTime(), // Start straight away.
+ force_full));
}
}
-void Heap::ConcurrentGC(Thread* self) {
+void Heap::ConcurrentGC(Thread* self, bool force_full) {
if (!Runtime::Current()->IsShuttingDown(self)) {
// Wait for any GCs currently running to finish.
if (WaitForGcToComplete(kGcCauseBackground, self) == collector::kGcTypeNone) {
// If the we can't run the GC type we wanted to run, find the next appropriate one and try that
// instead. E.g. can't do partial, so do full instead.
- if (CollectGarbageInternal(next_gc_type_, kGcCauseBackground, false) ==
+ collector::GcType next_gc_type = next_gc_type_;
+ // If forcing full and next gc type is sticky, override with a non-sticky type.
+ if (force_full && next_gc_type == collector::kGcTypeSticky) {
+ next_gc_type = HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
+ }
+ if (CollectGarbageInternal(next_gc_type, kGcCauseBackground, false) ==
collector::kGcTypeNone) {
for (collector::GcType gc_type : gc_plan_) {
// Attempt to run the collector, if we succeed, we are done.
- if (gc_type > next_gc_type_ &&
+ if (gc_type > next_gc_type &&
CollectGarbageInternal(gc_type, kGcCauseBackground, false) !=
collector::kGcTypeNone) {
break;
@@ -3553,7 +3563,7 @@
UpdateMaxNativeFootprint();
} else if (!IsGCRequestPending()) {
if (IsGcConcurrent()) {
- RequestConcurrentGC(self);
+ RequestConcurrentGC(self, true); // Request non-sticky type.
} else {
CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 565687c..90249f9 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -288,7 +288,7 @@
// Does a concurrent GC, should only be called by the GC daemon thread
// through runtime.
- void ConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
+ void ConcurrentGC(Thread* self, bool force_full) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
// Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount.
// The boolean decides whether to use IsAssignableFrom or == when comparing classes.
@@ -664,7 +664,7 @@
void RequestTrim(Thread* self) LOCKS_EXCLUDED(pending_task_lock_);
// Request asynchronous GC.
- void RequestConcurrentGC(Thread* self) LOCKS_EXCLUDED(pending_task_lock_);
+ void RequestConcurrentGC(Thread* self, bool force_full) LOCKS_EXCLUDED(pending_task_lock_);
// Whether or not we may use a garbage collector, used so that we only create collectors we need.
bool MayUseCollector(CollectorType type) const;
@@ -786,7 +786,7 @@
void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time)
LOCKS_EXCLUDED(pending_task_lock_);
- void RequestConcurrentGCAndSaveObject(Thread* self, mirror::Object** obj)
+ void RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, mirror::Object** obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsGCRequestPending() const;
@@ -1201,41 +1201,23 @@
friend class VerifyReferenceVisitor;
friend class VerifyObjectVisitor;
friend class ScopedHeapFill;
- friend class ScopedHeapLock;
friend class space::SpaceTest;
class AllocationTimer {
- private:
- Heap* heap_;
- mirror::Object** allocated_obj_ptr_;
- uint64_t allocation_start_time_;
public:
- AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr);
- ~AllocationTimer();
+ ALWAYS_INLINE AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr);
+ ALWAYS_INLINE ~AllocationTimer();
+ private:
+ Heap* const heap_;
+ mirror::Object** allocated_obj_ptr_;
+ const uint64_t allocation_start_time_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationTimer);
};
DISALLOW_IMPLICIT_CONSTRUCTORS(Heap);
};
-// ScopedHeapFill changes the bytes allocated counter to be equal to the growth limit. This
-// causes the next allocation to perform a GC and possibly an OOM. It can be used to ensure that a
-// GC happens in specific methods such as ThrowIllegalMonitorStateExceptionF in Monitor::Wait.
-class ScopedHeapFill {
- public:
- explicit ScopedHeapFill(Heap* heap)
- : heap_(heap),
- delta_(heap_->GetMaxMemory() - heap_->GetBytesAllocated()) {
- heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(delta_);
- }
- ~ScopedHeapFill() {
- heap_->num_bytes_allocated_.FetchAndSubSequentiallyConsistent(delta_);
- }
-
- private:
- Heap* const heap_;
- const int64_t delta_;
-};
-
} // namespace gc
} // namespace art
diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h
index c67fd98..a44319b 100644
--- a/runtime/gc/reference_processor.h
+++ b/runtime/gc/reference_processor.h
@@ -81,6 +81,9 @@
IsHeapReferenceMarkedCallback* is_marked_callback_;
MarkObjectCallback* mark_callback_;
void* arg_;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ProcessReferencesArgs);
};
bool SlowPathEnabled() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Called by ProcessReferences.
@@ -105,6 +108,8 @@
ReferenceQueue finalizer_reference_queue_;
ReferenceQueue phantom_reference_queue_;
ReferenceQueue cleared_references_;
+
+ DISALLOW_COPY_AND_ASSIGN(ReferenceProcessor);
};
} // namespace gc
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index f7d89d0..c45be85 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -106,7 +106,7 @@
// GC types.
mirror::Reference* list_;
- DISALLOW_COPY_AND_ASSIGN(ReferenceQueue);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ReferenceQueue);
};
} // namespace gc
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index f2378d9..871ebac 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -187,7 +187,7 @@
private:
friend class art::gc::Heap;
- DISALLOW_COPY_AND_ASSIGN(Space);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Space);
};
std::ostream& operator<<(std::ostream& os, const Space& space);
@@ -337,7 +337,7 @@
uint8_t* limit_;
private:
- DISALLOW_COPY_AND_ASSIGN(ContinuousSpace);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ContinuousSpace);
};
// A space where objects may be allocated higgledy-piggledy throughout virtual memory. Currently
@@ -366,7 +366,7 @@
std::unique_ptr<accounting::LargeObjectBitmap> mark_bitmap_;
private:
- DISALLOW_COPY_AND_ASSIGN(DiscontinuousSpace);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(DiscontinuousSpace);
};
class MemMapSpace : public ContinuousSpace {
@@ -400,7 +400,7 @@
std::unique_ptr<MemMap> mem_map_;
private:
- DISALLOW_COPY_AND_ASSIGN(MemMapSpace);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MemMapSpace);
};
// Used by the heap compaction interface to enable copying from one type of alloc space to another.
@@ -453,7 +453,7 @@
private:
friend class gc::Heap;
- DISALLOW_COPY_AND_ASSIGN(ContinuousMemMapAllocSpace);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ContinuousMemMapAllocSpace);
};
} // namespace space
diff --git a/runtime/gc/task_processor.h b/runtime/gc/task_processor.h
index 67e3a54..5f48619 100644
--- a/runtime/gc/task_processor.h
+++ b/runtime/gc/task_processor.h
@@ -46,6 +46,7 @@
uint64_t target_run_time_;
friend class TaskProcessor;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(HeapTask);
};
// Used to process GC tasks (heap trim, heap transitions, concurrent GC).
@@ -78,6 +79,8 @@
std::unique_ptr<ConditionVariable> cond_ GUARDED_BY(lock_);
std::multiset<HeapTask*, CompareByTargetRunTime> tasks_ GUARDED_BY(lock_);
Thread* running_thread_ GUARDED_BY(lock_);
+
+ DISALLOW_COPY_AND_ASSIGN(TaskProcessor);
};
} // namespace gc
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 3e80aef..f5ad8b8 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -86,6 +86,8 @@
std::unique_ptr<jit::JitInstrumentationCache> instrumentation_cache_;
std::unique_ptr<jit::JitCodeCache> code_cache_;
CompilerCallbacks* compiler_callbacks_; // Owned by the jit compiler.
+
+ DISALLOW_COPY_AND_ASSIGN(Jit);
};
class JitOptions {
@@ -114,8 +116,9 @@
bool dump_info_on_shutdown_;
JitOptions() : use_jit_(false), code_cache_capacity_(0), compile_threshold_(0),
- dump_info_on_shutdown_(false) {
- }
+ dump_info_on_shutdown_(false) { }
+
+ DISALLOW_COPY_AND_ASSIGN(JitOptions);
};
} // namespace jit
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index da891fe..8b76647 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -130,7 +130,7 @@
// required since we have to implement ClassLinker::GetQuickOatCodeFor for walking stacks.
SafeMap<mirror::ArtMethod*, const void*> method_code_map_ GUARDED_BY(lock_);
- DISALLOW_COPY_AND_ASSIGN(JitCodeCache);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
};
diff --git a/runtime/jit/jit_instrumentation.cc b/runtime/jit/jit_instrumentation.cc
index 160e678..e2f9cec 100644
--- a/runtime/jit/jit_instrumentation.cc
+++ b/runtime/jit/jit_instrumentation.cc
@@ -47,6 +47,8 @@
private:
mirror::ArtMethod* const method_;
JitInstrumentationCache* const cache_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JitCompileTask);
};
JitInstrumentationCache::JitInstrumentationCache(size_t hot_method_threshold)
diff --git a/runtime/jit/jit_instrumentation.h b/runtime/jit/jit_instrumentation.h
index 9d5d74f..72acaef 100644
--- a/runtime/jit/jit_instrumentation.h
+++ b/runtime/jit/jit_instrumentation.h
@@ -58,6 +58,8 @@
std::unordered_map<jmethodID, size_t> samples_;
size_t hot_method_threshold_;
std::unique_ptr<ThreadPool> thread_pool_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JitInstrumentationCache);
};
class JitInstrumentationListener : public instrumentation::InstrumentationListener {
@@ -97,6 +99,8 @@
private:
JitInstrumentationCache* const instrumentation_cache_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JitInstrumentationListener);
};
} // namespace jit
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index fc3826b..9bb08a2 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -2109,10 +2109,12 @@
m = c->FindVirtualMethod(name, sig);
}
if (m == nullptr) {
- c->DumpClass(LOG(ERROR), mirror::Class::kDumpClassFullDetail);
- LOG(return_errors ? ERROR : FATAL) << "Failed to register native method "
+ LOG(return_errors ? ERROR : INTERNAL_FATAL) << "Failed to register native method "
<< PrettyDescriptor(c) << "." << name << sig << " in "
<< c->GetDexCache()->GetLocation()->ToModifiedUtf8();
+ // Safe to pass in LOG(FATAL) since the log object aborts in destructor and only goes
+ // out of scope after the DumpClass is done executing.
+ c->DumpClass(LOG(return_errors ? ERROR : FATAL), mirror::Class::kDumpClassFullDetail);
ThrowNoSuchMethodError(soa, c, name, sig, "static or non-static");
return JNI_ERR;
} else if (!m->IsNative()) {
diff --git a/runtime/linear_alloc.h b/runtime/linear_alloc.h
index 6d8eda6..fcabcc8 100644
--- a/runtime/linear_alloc.h
+++ b/runtime/linear_alloc.h
@@ -42,6 +42,8 @@
private:
mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
ArenaAllocator allocator_ GUARDED_BY(lock_);
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(LinearAlloc);
};
} // namespace art
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 53bb129..9736e15 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -223,7 +223,7 @@
}
static void VMRuntime_concurrentGC(JNIEnv* env, jobject) {
- Runtime::Current()->GetHeap()->ConcurrentGC(ThreadForEnv(env));
+ Runtime::Current()->GetHeap()->ConcurrentGC(ThreadForEnv(env), true);
}
static void VMRuntime_requestHeapTrim(JNIEnv* env, jobject) {
@@ -231,7 +231,7 @@
}
static void VMRuntime_requestConcurrentGC(JNIEnv* env, jobject) {
- Runtime::Current()->GetHeap()->RequestConcurrentGC(ThreadForEnv(env));
+ Runtime::Current()->GetHeap()->RequestConcurrentGC(ThreadForEnv(env), true);
}
static void VMRuntime_startHeapTaskProcessor(JNIEnv* env, jobject) {
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index b93fcb4..99750a1 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -133,11 +133,7 @@
T AddLocalReference(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
- if (obj == nullptr) {
- return nullptr;
- }
- DCHECK_NE((reinterpret_cast<uintptr_t>(obj) & 0xffff0000), 0xebad0000);
- return Env()->AddLocalReference<T>(obj);
+ return obj == nullptr ? nullptr : Env()->AddLocalReference<T>(obj);
}
template<typename T>
diff --git a/runtime/utils.cc b/runtime/utils.cc
index e18af00..650214f 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -262,8 +262,8 @@
void NanoSleep(uint64_t ns) {
timespec tm;
- tm.tv_sec = 0;
- tm.tv_nsec = ns;
+ tm.tv_sec = ns / MsToNs(1000);
+ tm.tv_nsec = ns - static_cast<uint64_t>(tm.tv_sec) * MsToNs(1000);
nanosleep(&tm, nullptr);
}
diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc
index 259fe33..195de0c 100644
--- a/runtime/utils_test.cc
+++ b/runtime/utils_test.cc
@@ -515,4 +515,10 @@
EXPECT_FALSE(IsAbsoluteUint<32>(UINT_MAX_plus1));
}
+TEST_F(UtilsTest, TestSleep) {
+ auto start = NanoTime();
+ NanoSleep(MsToNs(1500));
+ EXPECT_GT(NanoTime() - start, MsToNs(1000));
+}
+
} // namespace art