Add packing to class Thread.

Otherwise, code compiled on the host can use different offsets than
we need for the same structure on the target.

Also add commented-out code to start up the various GC daemon threads.
More Class native methods need to be implemented before that will work.

Change-Id: I618b647b92378eec1b25cee469c8cfccf42f21fd
diff --git a/build/Android.common.mk b/build/Android.common.mk
index 33e59cc..b99a876 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -68,6 +68,8 @@
 	src/calling_convention_arm.cc \
 	src/calling_convention_x86.cc \
 	src/context.cc \
+	src/context_arm.cc.arm \
+	src/context_x86.cc \
 	src/check_jni.cc \
 	src/class_linker.cc \
 	src/class_loader.cc \
@@ -140,7 +142,6 @@
 
 LIBART_TARGET_SRC_FILES := \
 	$(LIBART_COMMON_SRC_FILES) \
-	src/context_arm.cc.arm \
 	src/logging_android.cc \
 	src/runtime_android.cc \
 	src/thread_android.cc \
@@ -148,7 +149,6 @@
 
 LIBART_HOST_SRC_FILES := \
 	$(LIBART_COMMON_SRC_FILES) \
-	src/context_x86.cc \
 	src/logging_linux.cc \
 	src/runtime_linux.cc \
 	src/thread_linux.cc \
diff --git a/src/context_arm.cc b/src/context_arm.cc
index 3c2af94..387b71d 100644
--- a/src/context_arm.cc
+++ b/src/context_arm.cc
@@ -32,6 +32,7 @@
 }
 
 void ArmContext::DoLongJump() {
+#if defined(__arm__)
   // TODO: Load all GPRs and FPRs, currently the code restores registers R4 to PC
   asm volatile ( "mov %%r0, %0\n"
                  "mov %%r1, %1\n"
@@ -50,6 +51,9 @@
         "w28"(fprs_[28]), "w29"(fprs_[29]), "w30"(fprs_[30]), "w31"(fprs_[31])
 #endif
       :);  // clobber
+#else
+  UNIMPLEMENTED(FATAL);
+#endif
 }
 
 }  // namespace arm
diff --git a/src/context_x86.cc b/src/context_x86.cc
index 2f328e1..43de4ba 100644
--- a/src/context_x86.cc
+++ b/src/context_x86.cc
@@ -2,16 +2,22 @@
 
 #include "context_x86.h"
 
+#include "logging.h"
+
 namespace art {
 namespace x86 {
 
 void X86Context::DoLongJump() {
+#if defined(__i386__)
   // Load ESP and EIP
   asm volatile ( "movl %%esp, %0\n"
                  "jmp *%1"
       : // output
       : "m"(esp_), "r"(&eip_)  // input
       :);  // clobber
+#else
+  UNIMPLEMENTED(FATAL);
+#endif
 }
 
 }  // namespace x86
diff --git a/src/heap.cc b/src/heap.cc
index 4778611..094790a 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -124,6 +124,8 @@
     RecordImageAllocations(image_spaces[i]);
   }
 
+  Heap::EnableObjectValidation();
+
   // It's still to early to take a lock because there are no threads yet,
   // but we can create the heap lock now. We don't create it earlier to
   // make it clear that you can't use locks during heap initialization.
@@ -167,10 +169,13 @@
   return true;
 }
 
-bool Heap::verify_object_disabled_;
+bool Heap::verify_objects_ = false;
 
 #if VERIFY_OBJECT_ENABLED
 void Heap::VerifyObject(const Object* obj) {
+  if (!verify_objects_) {
+    return;
+  }
   ScopedHeapLock lock;
   Heap::VerifyObjectLocked(obj);
 }
@@ -178,7 +183,7 @@
 
 void Heap::VerifyObjectLocked(const Object* obj) {
   lock_->AssertHeld();
-  if (obj != NULL && !verify_object_disabled_) {
+  if (obj != NULL) {
     if (!IsAligned(obj, kObjectAlignment)) {
       LOG(FATAL) << "Object isn't aligned: " << obj;
     } else if (!live_bitmap_->Test(obj)) {
diff --git a/src/heap.h b/src/heap.h
index e95cfcc..e12ef4a 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -143,9 +143,12 @@
     return finalizer_reference_zombie_offset_;
   }
 
+  static void EnableObjectValidation() {
+    verify_objects_ = true;
+  }
+
   static void DisableObjectValidation() {
-    // TODO: remove this hack necessary for image writing
-    verify_object_disabled_ = true;
+    verify_objects_ = false;
   }
 
   // Callers must hold the heap lock.
@@ -220,7 +223,7 @@
   // offset of java.lang.ref.FinalizerReference.zombie
   static MemberOffset finalizer_reference_zombie_offset_;
 
-  static bool verify_object_disabled_;
+  static bool verify_objects_;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(Heap);
 };
diff --git a/src/jni_internal.h b/src/jni_internal.h
index a0602e5..38e38fc 100644
--- a/src/jni_internal.h
+++ b/src/jni_internal.h
@@ -104,7 +104,7 @@
   JNIEnvExt(Thread* self, JavaVMExt* vm);
   ~JNIEnvExt();
 
-  Thread* self;
+  Thread* const self;
   JavaVMExt* vm;
 
   // Frequently-accessed fields cached from JavaVM.
diff --git a/src/macros.h b/src/macros.h
index 36afd27..9c5124b 100644
--- a/src/macros.h
+++ b/src/macros.h
@@ -123,4 +123,6 @@
 #define OFFSETOF_VOLATILE_MEMBER(t, f) \
   (reinterpret_cast<volatile char*>(&reinterpret_cast<t*>(16)->f) - reinterpret_cast<volatile char*>(16))
 
+#define PACKED __attribute__ ((__packed__))
+
 #endif  // ART_SRC_MACROS_H_
diff --git a/src/monitor.cc b/src/monitor.cc
index 1a5e6c9..cc74838 100644
--- a/src/monitor.cc
+++ b/src/monitor.cc
@@ -511,7 +511,7 @@
     self->SetState(Thread::kWaiting);
   }
 
-  self->wait_mutex_.Lock();
+  self->wait_mutex_->Lock();
 
   /*
    * Set wait_monitor_ to the monitor object we will be waiting on.
@@ -529,7 +529,7 @@
   if (self->interrupted_) {
     wasInterrupted = true;
     self->wait_monitor_ = NULL;
-    self->wait_mutex_.Unlock();
+    self->wait_mutex_->Unlock();
     goto done;
   }
 
@@ -540,9 +540,9 @@
   lock_.Unlock();
 
   if (!timed) {
-    self->wait_cond_.Wait(self->wait_mutex_);
+    self->wait_cond_->Wait(*self->wait_mutex_);
   } else {
-    self->wait_cond_.TimedWait(self->wait_mutex_, ts);
+    self->wait_cond_->TimedWait(*self->wait_mutex_, ts);
   }
   if (self->interrupted_) {
     wasInterrupted = true;
@@ -550,7 +550,7 @@
 
   self->interrupted_ = false;
   self->wait_monitor_ = NULL;
-  self->wait_mutex_.Unlock();
+  self->wait_mutex_->Unlock();
 
   // Reacquire the monitor lock.
   Lock(self);
@@ -601,9 +601,9 @@
     thread->wait_next_ = NULL;
 
     // Check to see if the thread is still waiting.
-    MutexLock mu(thread->wait_mutex_);
+    MutexLock mu(*thread->wait_mutex_);
     if (thread->wait_monitor_ != NULL) {
-      thread->wait_cond_.Signal();
+      thread->wait_cond_->Signal();
       return;
     }
   }
diff --git a/src/object.h b/src/object.h
index 02c98d8..8addcc6 100644
--- a/src/object.h
+++ b/src/object.h
@@ -185,7 +185,7 @@
 
 // Classes shared with the managed side of the world need to be packed
 // so that they don't have extra platform specific padding.
-#define MANAGED __attribute__ ((__packed__))
+#define MANAGED PACKED
 
 // C++ mirror of java.lang.Object
 class MANAGED Object {
diff --git a/src/runtime.cc b/src/runtime.cc
index d82f255..e0a0b20 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -187,13 +187,9 @@
   parsed->check_jni_ = false;
 #else
   // ...but on by default in debug builds.
-#if 0 // TODO: disabled for oatexec until the shorty's used by check_jni are managed heap allocated.
-      // Instead we turn on -Xcheck_jni in common_test.
   parsed->check_jni_ = true;
-#else
-  parsed->check_jni_ = false;
 #endif
-#endif
+
   parsed->heap_initial_size_ = Heap::kInitialSize;
   parsed->heap_maximum_size_ = Heap::kMaximumSize;
   parsed->stack_size_ = Thread::kDefaultStackSize;
@@ -338,6 +334,8 @@
     CreateClassPath(parsed->class_path_string_, parsed->class_path_);
   }
 
+  LOG(INFO) << "CheckJNI is " << (parsed->check_jni_ ? "on" : "off");
+
   return parsed.release();
 }
 
@@ -357,11 +355,25 @@
 void Runtime::Start() {
   started_ = true;
 
+  // Initialize both the built-in and libcore native methods.
+  InitLibraries();
+
   // Finish attaching the main thread.
-  Thread* main_thread = Thread::Current();
-  instance_->InitLibraries();
-  main_thread->CreatePeer("main", false);
-  instance_->signal_catcher_ = new SignalCatcher;
+  Thread::Current()->CreatePeer("main", false);
+
+  StartDaemonThreads();
+}
+
+void Runtime::StartDaemonThreads() {
+  signal_catcher_ = new SignalCatcher;
+
+  Class* c = class_linker_->FindSystemClass("Ljava/lang/Daemons;");
+  CHECK(c != NULL);
+  Method* m = c->FindDirectMethod("start", "()V");
+  CHECK(m != NULL);
+//  m->Invoke(Thread::Current(), NULL, NULL, NULL);
+
+  signal_catcher_->HandleSigQuit();
 }
 
 bool Runtime::IsStarted() {
diff --git a/src/runtime.h b/src/runtime.h
index 084e49c..35f8ef7 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -163,6 +163,7 @@
   bool Init(const Options& options, bool ignore_unrecognized);
   void InitLibraries();
   void RegisterRuntimeNativeMethods(JNIEnv*);
+  void StartDaemonThreads();
 
   std::string boot_class_path_;
   std::string class_path_;
diff --git a/src/thread.cc b/src/thread.cc
index 49c9605..83c1b81 100644
--- a/src/thread.cc
+++ b/src/thread.cc
@@ -830,8 +830,8 @@
 
 Thread::Thread()
     : peer_(NULL),
-      wait_mutex_("Thread wait mutex"),
-      wait_cond_("Thread wait condition variable"),
+      wait_mutex_(new Mutex("Thread wait mutex")),
+      wait_cond_(new ConditionVariable("Thread wait condition variable")),
       wait_monitor_(NULL),
       interrupted_(false),
       stack_end_(NULL),
@@ -842,7 +842,8 @@
       state_(Thread::kUnknown),
       exception_(NULL),
       suspend_count_(0),
-      class_loader_override_(NULL) {
+      class_loader_override_(NULL),
+      long_jump_context_(NULL) {
 }
 
 void MonitorExitVisitor(const Object* object, void*) {
@@ -896,6 +897,11 @@
   jni_env_ = NULL;
 
   SetState(Thread::kTerminated);
+
+  delete wait_cond_;
+  delete wait_mutex_;
+
+  delete long_jump_context_;
 }
 
 size_t Thread::NumSirtReferences() {
@@ -1235,10 +1241,10 @@
 }
 
 Context* Thread::GetLongJumpContext() {
-  Context* result = long_jump_context_.get();
+  Context* result = long_jump_context_;
   if (result == NULL) {
     result = Context::Create();
-    long_jump_context_.reset(result);
+    long_jump_context_ = result;
   }
   return result;
 }
diff --git a/src/thread.h b/src/thread.h
index ef2fa12..860a185 100644
--- a/src/thread.h
+++ b/src/thread.h
@@ -102,7 +102,7 @@
 };
 
 // Iterator over managed frames up to the first native-to-managed transition
-class Frame {
+class PACKED Frame {
  public:
   Frame() : sp_(NULL) {}
 
@@ -138,7 +138,7 @@
   Method** sp_;
 };
 
-class Thread {
+class PACKED Thread {
  public:
   /* thread priorities, from java.lang.Thread */
   enum Priority {
@@ -390,7 +390,7 @@
 
   // Implements java.lang.Thread.interrupted.
   bool Interrupted() {
-    MutexLock mu(wait_mutex_);
+    MutexLock mu(*wait_mutex_);
     bool interrupted = interrupted_;
     interrupted_ = false;
     return interrupted;
@@ -398,12 +398,12 @@
 
   // Implements java.lang.Thread.isInterrupted.
   bool IsInterrupted() {
-    MutexLock mu(wait_mutex_);
+    MutexLock mu(*wait_mutex_);
     return interrupted_;
   }
 
   void Interrupt() {
-    MutexLock mu(wait_mutex_);
+    MutexLock mu(*wait_mutex_);
     if (interrupted_) {
       return;
     }
@@ -412,7 +412,7 @@
   }
 
   void Notify() {
-    MutexLock mu(wait_mutex_);
+    MutexLock mu(*wait_mutex_);
     NotifyLocked();
   }
 
@@ -535,7 +535,7 @@
 
   void NotifyLocked() {
     if (wait_monitor_ != NULL) {
-      wait_cond_.Signal();
+      wait_cond_->Signal();
     }
   }
 
@@ -562,8 +562,8 @@
   Object* peer_;
 
   // Guards the 'interrupted_' and 'wait_monitor_' members.
-  mutable Mutex wait_mutex_;
-  ConditionVariable wait_cond_;
+  mutable Mutex* wait_mutex_;
+  ConditionVariable* wait_cond_;
   // Pointer to the monitor lock we're currently waiting on (or NULL), guarded by wait_mutex_.
   Monitor* wait_monitor_;
   // Thread "interrupted" status; stays raised until queried or thrown, guarded by wait_mutex_.
@@ -619,7 +619,7 @@
   const ClassLoader* class_loader_override_;
 
   // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
-  UniquePtr<Context> long_jump_context_;
+  Context* long_jump_context_;
 
   // TLS key used to retrieve the VM thread object.
   static pthread_key_t pthread_key_self_;