Disable compaction for jni workarounds.

Compaction can't work when jni workarounds is enabled. Also some
other refactoring.

Change-Id: Ia7b0f2b39c79f5a0a5f50874d823b950ab02a0c3
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 87ee21b..1fcbe4d 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -318,6 +318,16 @@
   }
 }
 
+void Heap::DisableCompaction() {
+  if (IsCompactingGC(post_zygote_collector_type_)) {
+    post_zygote_collector_type_ = kCollectorTypeCMS;
+  }
+  if (IsCompactingGC(background_collector_type_)) {
+    background_collector_type_ = post_zygote_collector_type_;
+  }
+  TransitionCollector(post_zygote_collector_type_);
+}
+
 std::string Heap::SafeGetClassDescriptor(mirror::Class* klass) {
   if (!IsValidContinuousSpaceObjectAddress(klass)) {
     return StringPrintf("<non heap address klass %p>", klass);
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 88adf81..a90af27 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -487,6 +487,9 @@
   // Assumes there is only one image space.
   space::ImageSpace* GetImageSpace() const;
 
+  // Permenantly disable compaction.
+  void DisableCompaction();
+
   space::DlMallocSpace* GetDlMallocSpace() const {
     return dlmalloc_space_;
   }
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 54c7b6e..82b53f6 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -21,6 +21,7 @@
 #include "scoped_thread_state_change.h"
 #include "thread.h"
 #include "utils.h"
+#include "verify_object-inl.h"
 
 #include <cstdlib>
 
@@ -81,8 +82,7 @@
   size_t topIndex = segment_state_.parts.topIndex;
 
   CHECK(obj != NULL);
-  // TODO: stronger sanity check on the object (such as in heap)
-  DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(obj), 8);
+  VerifyObject(obj);
   DCHECK(table_ != NULL);
   DCHECK_LE(alloc_entries_, max_entries_);
   DCHECK_GE(segment_state_.parts.numHoles, prevState.parts.numHoles);
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index f48e8ad..8716556 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -177,6 +177,8 @@
           << targetSdkVersion << "...";
 
       vm->work_around_app_jni_bugs = true;
+      LOG(WARNING) << "Permenantly disabling heap compaction due to jni workarounds";
+      Runtime::Current()->GetHeap()->DisableCompaction();
     }
   }
 }
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 0ad0190..a50fa00 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1196,7 +1196,7 @@
   // The "kinds" below are sorted by the frequency we expect to encounter them.
   if (kind == kLocal) {
     IndirectReferenceTable& locals = jni_env_->locals;
-    result = const_cast<mirror::Object*>(locals.Get(ref));
+    result = locals.Get(ref);
   } else if (kind == kSirtOrInvalid) {
     // TODO: make stack indirect reference table lookup more efficient.
     // Check if this is a local reference in the SIRT.
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index d311945..7745a19 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -193,10 +193,10 @@
 
 size_t ThreadList::RunCheckpoint(Closure* checkpoint_function) {
   Thread* self = Thread::Current();
-  if (kIsDebugBuild) {
-    Locks::mutator_lock_->AssertNotExclusiveHeld(self);
-    Locks::thread_list_lock_->AssertNotHeld(self);
-    Locks::thread_suspend_count_lock_->AssertNotHeld(self);
+  Locks::mutator_lock_->AssertNotExclusiveHeld(self);
+  Locks::thread_list_lock_->AssertNotHeld(self);
+  Locks::thread_suspend_count_lock_->AssertNotHeld(self);
+  if (kDebugLocking) {
     CHECK_NE(self->GetState(), kRunnable);
   }
 
@@ -273,26 +273,24 @@
 
   VLOG(threads) << *self << " SuspendAll starting...";
 
-  if (kIsDebugBuild) {
-    Locks::mutator_lock_->AssertNotHeld(self);
-    Locks::thread_list_lock_->AssertNotHeld(self);
-    Locks::thread_suspend_count_lock_->AssertNotHeld(self);
+  Locks::mutator_lock_->AssertNotHeld(self);
+  Locks::thread_list_lock_->AssertNotHeld(self);
+  Locks::thread_suspend_count_lock_->AssertNotHeld(self);
+  if (kDebugLocking) {
     CHECK_NE(self->GetState(), kRunnable);
   }
   {
     MutexLock mu(self, *Locks::thread_list_lock_);
-    {
-      MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
-      // Update global suspend all state for attaching threads.
-      ++suspend_all_count_;
-      // Increment everybody's suspend count (except our own).
-      for (const auto& thread : list_) {
-        if (thread == self) {
-          continue;
-        }
-        VLOG(threads) << "requesting thread suspend: " << *thread;
-        thread->ModifySuspendCount(self, +1, false);
+    MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
+    // Update global suspend all state for attaching threads.
+    ++suspend_all_count_;
+    // Increment everybody's suspend count (except our own).
+    for (const auto& thread : list_) {
+      if (thread == self) {
+        continue;
       }
+      VLOG(threads) << "requesting thread suspend: " << *thread;
+      thread->ModifySuspendCount(self, +1, false);
     }
   }
 
@@ -306,8 +304,10 @@
   Locks::mutator_lock_->ExclusiveLock(self);
 #endif
 
-  // Debug check that all threads are suspended.
-  AssertThreadsAreSuspended(self, self);
+  if (kDebugLocking) {
+    // Debug check that all threads are suspended.
+    AssertThreadsAreSuspended(self, self);
+  }
 
   VLOG(threads) << *self << " SuspendAll complete";
 }
@@ -317,8 +317,10 @@
 
   VLOG(threads) << *self << " ResumeAll starting";
 
-  // Debug check that all threads are suspended.
-  AssertThreadsAreSuspended(self, self);
+  if (kDebugLocking) {
+    // Debug check that all threads are suspended.
+    AssertThreadsAreSuspended(self, self);
+  }
 
   Locks::mutator_lock_->ExclusiveUnlock(self);
   {