Merge "Compiler set up of InitializedStaticStorage" into dalvik-dev
diff --git a/build/Android.common.mk b/build/Android.common.mk
index 1ab48f6..33e59cc 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -115,6 +115,7 @@
 	src/managed_register_arm.cc \
 	src/managed_register_x86.cc \
 	src/memory_region.cc \
+	src/monitor.cc \
 	src/mspace.c \
 	src/mutex.cc \
 	src/object.cc \
diff --git a/src/check_jni.cc b/src/check_jni.cc
index 61be30e..73c35b5 100644
--- a/src/check_jni.cc
+++ b/src/check_jni.cc
@@ -375,7 +375,7 @@
           return;
         } else {
           if (!obj->InstanceOf(field_type)) {
-            LOG(ERROR) << "JNI ERROR: attempt to set field " << PrettyField(f) << " with value of wrong type: " << PrettyType(obj);
+            LOG(ERROR) << "JNI ERROR: attempt to set field " << PrettyField(f) << " with value of wrong type: " << PrettyTypeOf(obj);
             JniAbort();
             return;
           }
@@ -419,7 +419,7 @@
     DCHECK(f_type != NULL);
     Class* c = o->GetClass();
     if (c->FindInstanceField(f->GetName()->ToModifiedUtf8(), f_type) == NULL) {
-      LOG(ERROR) << "JNI ERROR: jfieldID " << PrettyField(f) << " not valid for an object of class " << PrettyType(o);
+      LOG(ERROR) << "JNI ERROR: jfieldID " << PrettyField(f) << " not valid for an object of class " << PrettyTypeOf(o);
       JniAbort();
     }
   }
@@ -464,7 +464,7 @@
     Class* c = Decode<Class*>(ts, java_class);
     const Field* f = DecodeField(fid);
     if (f->GetDeclaringClass() != c) {
-      LOG(ERROR) << "JNI ERROR: static jfieldID " << fid << " not valid for class " << PrettyDescriptor(c->GetDescriptor());
+      LOG(ERROR) << "JNI ERROR: static jfieldID " << fid << " not valid for class " << PrettyClass(c);
       JniAbort();
     }
   }
@@ -483,7 +483,7 @@
     Class* c = Decode<Class*>(ts, java_class);
     const Method* m = DecodeMethod(mid);
     if (!c->IsAssignableFrom(m->GetDeclaringClass())) {
-      LOG(ERROR) << "JNI ERROR: can't call static " << PrettyMethod(m) << " on class " << PrettyDescriptor(c->GetDescriptor());
+      LOG(ERROR) << "JNI ERROR: can't call static " << PrettyMethod(m) << " on class " << PrettyClass(c);
       JniAbort();
     }
   }
@@ -500,7 +500,7 @@
     Object* o = Decode<Object*>(ts, java_object);
     const Method* m = DecodeMethod(mid);
     if (!o->InstanceOf(m->GetDeclaringClass())) {
-      LOG(ERROR) << "JNI ERROR: can't call " << PrettyMethod(m) << " on instance of " << PrettyType(o);
+      LOG(ERROR) << "JNI ERROR: can't call " << PrettyMethod(m) << " on instance of " << PrettyTypeOf(o);
       JniAbort();
     }
   }
@@ -608,7 +608,7 @@
           } else if (c == kInvalidIndirectRefObject || !Heap::IsHeapAddress(c)) {
             StringAppendF(&msg, "%p(INVALID)", jc);
           } else {
-            msg += PrettyDescriptor(c->GetDescriptor());
+            msg += PrettyClass(c);
             if (!entry) {
               StringAppendF(&msg, " (%p)", jc);
             }
@@ -755,7 +755,7 @@
       LOG(ERROR) << "JNI ERROR: jarray is an invalid " << GetIndirectRefKind(java_array) << ": " << reinterpret_cast<void*>(java_array);
       JniAbort();
     } else if (!a->IsArrayInstance()) {
-      LOG(ERROR) << "JNI ERROR: jarray argument has non-array type: " << PrettyType(a);
+      LOG(ERROR) << "JNI ERROR: jarray argument has non-array type: " << PrettyTypeOf(a);
       JniAbort();
     }
   }
@@ -952,7 +952,7 @@
       break;
     }
     if (!okay) {
-      LOG(ERROR) << "JNI ERROR: " << what << " has wrong type: " << PrettyType(obj);
+      LOG(ERROR) << "JNI ERROR: " << what << " has wrong type: " << PrettyTypeOf(obj);
       JniAbort();
     }
   }
diff --git a/src/class_linker.cc b/src/class_linker.cc
index 232390a..e3b4eea 100644
--- a/src/class_linker.cc
+++ b/src/class_linker.cc
@@ -7,7 +7,6 @@
 #include <utility>
 #include <vector>
 
-#include "UniquePtr.h"
 #include "casts.h"
 #include "class_loader.h"
 #include "dex_cache.h"
@@ -21,6 +20,7 @@
 #include "runtime.h"
 #include "space.h"
 #include "thread.h"
+#include "UniquePtr.h"
 #include "utils.h"
 
 namespace art {
@@ -56,6 +56,35 @@
   "[Ljava/lang/StackTraceElement;",
 };
 
+class ObjectLock {
+ public:
+  explicit ObjectLock(Object* object) : self_(Thread::Current()), obj_(object) {
+    CHECK(object != NULL);
+    obj_->MonitorEnter(self_);
+  }
+
+  ~ObjectLock() {
+    obj_->MonitorExit(self_);
+  }
+
+  void Wait() {
+    return Monitor::Wait(self_, obj_, 0, 0, false);
+  }
+
+  void Notify() {
+    obj_->Notify();
+  }
+
+  void NotifyAll() {
+    obj_->NotifyAll();
+  }
+
+ private:
+  Thread* self_;
+  Object* obj_;
+  DISALLOW_COPY_AND_ASSIGN(ObjectLock);
+};
+
 ClassLinker* ClassLinker::Create(const std::vector<const DexFile*>& boot_class_path,
                                  const std::vector<const DexFile*>& class_path,
                                  InternTable* intern_table, bool image) {
@@ -1193,7 +1222,7 @@
       klass->GetStatus() == Class::kStatusVerified ||
       klass->GetStatus() == Class::kStatusInitializing ||
       klass->GetStatus() == Class::kStatusError)
-          << PrettyDescriptor(klass->GetDescriptor()) << " is " << klass->GetStatus();
+          << PrettyClass(klass) << " is " << klass->GetStatus();
 
   Thread* self = Thread::Current();
 
@@ -1387,9 +1416,10 @@
     Class* super_class = klass->GetSuperClass();
     if (super_class->GetStatus() != Class::kStatusInitialized) {
       CHECK(!super_class->IsInterface());
-      klass->MonitorExit();
+      Thread* self = Thread::Current();
+      klass->MonitorEnter(self);
       bool super_initialized = InitializeClass(super_class);
-      klass->MonitorEnter();
+      klass->MonitorExit(self);
       // TODO: check for a pending exception
       if (!super_initialized) {
         klass->SetStatus(Class::kStatusError);
@@ -1407,10 +1437,11 @@
     return true;
   }
 
-  c->MonitorExit();
+  Thread* self = Thread::Current();
+  c->MonitorEnter(self);
   InitializeClass(c);
-  c->MonitorEnter();
-  return !Thread::Current()->IsExceptionPending();
+  c->MonitorExit(self);
+  return !self->IsExceptionPending();
 }
 
 StaticStorageBase* ClassLinker::InitializeStaticStorageFromCode(uint32_t type_idx,
diff --git a/src/compiler/Dalvik.h b/src/compiler/Dalvik.h
index f4f94e2..d57b72e 100644
--- a/src/compiler/Dalvik.h
+++ b/src/compiler/Dalvik.h
@@ -20,16 +20,18 @@
 #ifndef DALVIK_COMMON_H_
 #define DALVIK_COMMON_H_
 
+#include <assert.h>
 #include <stdbool.h>
 #include <stdint.h>
 #include <stdio.h>
-#include <assert.h>
-#include "logging.h"
-#include "object.h"
-#include "thread.h"
+
 #include "class_linker.h"
 #include "compiler.h"
 #include "dex_cache.h"
+#include "logging.h"
+#include "monitor.h"
+#include "object.h"
+#include "thread.h"
 #include "utils.h"
 
 // From Common.h
diff --git a/src/compiler/codegen/arm/Thumb2/Gen.cc b/src/compiler/codegen/arm/Thumb2/Gen.cc
index 2f63085..59aadcf 100644
--- a/src/compiler/codegen/arm/Thumb2/Gen.cc
+++ b/src/compiler/codegen/arm/Thumb2/Gen.cc
@@ -922,20 +922,18 @@
     ArmLIR* hopBranch;
 
     oatFlushAllRegs(cUnit);
-    assert(art::Monitor::kLwShapeThin == 0);
+    DCHECK_EQ(LW_SHAPE_THIN, 0);
     loadValueDirectFixed(cUnit, rlSrc, r1);  // Get obj
     oatLockCallTemps(cUnit);  // Prepare for explicit register usage
     genNullCheck(cUnit, rlSrc.sRegLow, r1, mir);
-    loadWordDisp(cUnit, rSELF, Thread::IdOffset().Int32Value(), r3);
+    loadWordDisp(cUnit, rSELF, Thread::ThinLockIdOffset().Int32Value(), r3);
     newLIR3(cUnit, kThumb2Ldrex, r2, r1,
             Object::MonitorOffset().Int32Value() >> 2); // Get object->lock
     // Align owner
-    opRegImm(cUnit, kOpLsl, r3, art::Monitor::kLwLockOwnerShift);
+    opRegImm(cUnit, kOpLsl, r3, LW_LOCK_OWNER_SHIFT);
     // Is lock unheld on lock or held by us (==threadId) on unlock?
-    newLIR4(cUnit, kThumb2Bfi, r3, r2, 0, art::Monitor::kLwLockOwnerShift
-            - 1);
-    newLIR3(cUnit, kThumb2Bfc, r2, art::Monitor::kLwHashStateShift,
-            art::Monitor::kLwLockOwnerShift - 1);
+    newLIR4(cUnit, kThumb2Bfi, r3, r2, 0, LW_LOCK_OWNER_SHIFT - 1);
+    newLIR3(cUnit, kThumb2Bfc, r2, LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1);
     hopBranch = newLIR2(cUnit, kThumb2Cbnz, r2, 0);
     newLIR4(cUnit, kThumb2Strex, r2, r3, r1,
             Object::MonitorOffset().Int32Value() >> 2);
@@ -972,20 +970,18 @@
     ArmLIR* hopTarget;
     ArmLIR* hopBranch;
 
-    assert(art::Monitor::kLwShapeThin == 0);
+    DCHECK_EQ(LW_SHAPE_THIN, 0);
     oatFlushAllRegs(cUnit);
     loadValueDirectFixed(cUnit, rlSrc, r1);  // Get obj
     oatLockCallTemps(cUnit);  // Prepare for explicit register usage
     genNullCheck(cUnit, rlSrc.sRegLow, r1, mir);
     loadWordDisp(cUnit, r1, Object::MonitorOffset().Int32Value(), r2); // Get lock
-    loadWordDisp(cUnit, rSELF, Thread::IdOffset().Int32Value(), r3);
+    loadWordDisp(cUnit, rSELF, Thread::ThinLockIdOffset().Int32Value(), r3);
     // Is lock unheld on lock or held by us (==threadId) on unlock?
-    opRegRegImm(cUnit, kOpAnd, r12, r2, (art::Monitor::kLwHashStateMask <<
-                art::Monitor::kLwHashStateShift));
+    opRegRegImm(cUnit, kOpAnd, r12, r2, (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT));
     // Align owner
-    opRegImm(cUnit, kOpLsl, r3, art::Monitor::kLwLockOwnerShift);
-    newLIR3(cUnit, kThumb2Bfc, r2, art::Monitor::kLwHashStateShift,
-            art::Monitor::kLwLockOwnerShift - 1);
+    opRegImm(cUnit, kOpLsl, r3, LW_LOCK_OWNER_SHIFT);
+    newLIR3(cUnit, kThumb2Bfc, r2, LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1);
     opRegReg(cUnit, kOpSub, r2, r3);
     hopBranch = opCondBranch(cUnit, kArmCondNe);
     oatGenMemBarrier(cUnit, kSY);
diff --git a/src/compiler_test.cc b/src/compiler_test.cc
index 5207561..712a3a0 100644
--- a/src/compiler_test.cc
+++ b/src/compiler_test.cc
@@ -358,9 +358,4 @@
   AssertStaticIntMethod(20664, LoadDex("Invoke"), "Invoke", "test0", "(I)I", 912);
 }
 
-TEST_F(CompilerTest, DISABLED_LARGE_SystemMethodsTest) {
-  CompileAll(NULL); // This test calls a bunch of stuff from libcore.
-  AssertStaticIntMethod(123, LoadDex("SystemMethods"), "SystemMethods", "test5", "()I");
-}
-
 }  // namespace art
diff --git a/src/dex_verifier.cc b/src/dex_verifier.cc
index b6739f8..b15e7bd 100644
--- a/src/dex_verifier.cc
+++ b/src/dex_verifier.cc
@@ -316,7 +316,6 @@
   const DexFile::CodeItem* code_item = vdata->code_item_;
   uint16_t registers_size = code_item->registers_size_;
   uint32_t insns_size = code_item->insns_size_;
-  bool generate_register_map = true;
   RegisterTable reg_table;
 
   if (registers_size * insns_size > 4*1024*1024) {
@@ -325,8 +324,7 @@
   }
 
   /* Create and initialize register lists. */
-  if (!InitRegisterTable(vdata, &reg_table,
-      generate_register_map ? kTrackRegsGcPoints : kTrackRegsBranches)) {
+  if (!InitRegisterTable(vdata, &reg_table, kTrackRegsGcPoints)) {
     return false;
   }
 
@@ -349,22 +347,16 @@
     return false;
   }
 
-  /* Generate a register map. */
-  if (generate_register_map) {
-    UniquePtr<RegisterMap> map(GenerateRegisterMapV(vdata));
-    /*
-     * Tuck the map into the Method. It will either get used directly or, if
-     * we're in dexopt, will be packed up and appended to the DEX file.
-     */
-    ByteArray* header = ByteArray::Alloc(sizeof(RegisterMapHeader));
-    ByteArray* data = ByteArray::Alloc(ComputeRegisterMapSize(map.get()));
+  /* Generate a register map and add it to the method. */
+  UniquePtr<RegisterMap> map(GenerateRegisterMapV(vdata));
+  ByteArray* header = ByteArray::Alloc(sizeof(RegisterMapHeader));
+  ByteArray* data = ByteArray::Alloc(ComputeRegisterMapSize(map.get()));
 
-    memcpy(header->GetData(), map.get()->header_, sizeof(RegisterMapHeader));
-    memcpy(data->GetData(), map.get()->data_, ComputeRegisterMapSize(map.get()));
+  memcpy(header->GetData(), map.get()->header_, sizeof(RegisterMapHeader));
+  memcpy(data->GetData(), map.get()->data_, ComputeRegisterMapSize(map.get()));
 
-    method->SetRegisterMapHeader(header);
-    method->SetRegisterMapData(data);
-  }
+  method->SetRegisterMapHeader(header);
+  method->SetRegisterMapData(data);
 
   return true;
 }
@@ -1610,7 +1602,6 @@
           break;
         if (res_class != NULL) {
           if (!decl_class->IsInterface() &&
-              //!res_class->InstanceOf(decl_class)) {
               !decl_class->IsAssignableFrom(res_class)) {
             LOG(ERROR) << "VFY: returning " << std::hex
                        << res_class->GetDescriptor()->ToModifiedUtf8()
@@ -3446,9 +3437,7 @@
     SetRegisterType(work_line, reg + 1, kRegTypeUnknown);
   }
 
-  /*
-   * Handle "continue". Tag the next consecutive instruction.
-   */
+  /* Handle "continue". Tag the next consecutive instruction. */
   if ((opcode_flag & Instruction::kContinue) != 0) {
     size_t insn_width = InsnGetWidth(insn_flags, insn_idx);
     if (insn_idx + insn_width >= insns_size) {
@@ -5305,7 +5294,7 @@
   uint32_t data_size = gc_point_count * (bytes_for_addr + reg_width);
 
   RegisterMap* map = new RegisterMap(format, reg_width, gc_point_count,
-      true, data_size);
+      data_size);
 
   /* Populate it. */
   uint8_t* map_data = map->data_;
@@ -5371,6 +5360,121 @@
   return map;
 }
 
+DexVerifier::RegisterMap* DexVerifier::GetExpandedRegisterMapHelper(
+    Method* method, RegisterMap* map) {
+  RegisterMap* new_map;
+
+  if (map == NULL)
+    return NULL;
+
+  /* TODO: sanity check to ensure this isn't called w/o external locking */
+
+  uint8_t format = map->header_->format_;
+  switch (format) {
+    case kRegMapFormatCompact8:
+    case kRegMapFormatCompact16:
+      /* already expanded */
+      return map;
+    case kRegMapFormatDifferential:
+      new_map = UncompressMapDifferential(map);
+      break;
+    default:
+      LOG(ERROR) << "Unknown format " << format
+                 << " in dvmGetExpandedRegisterMap";
+      return NULL;
+  }
+
+  if (new_map == NULL) {
+    LOG(ERROR) << "Map failed to uncompress (fmt=" << format << ") "
+               << method->GetDeclaringClass()->GetDescriptor()->ToModifiedUtf8()
+               << "." << method->GetName();
+    return NULL;
+  }
+
+  /* Update method, and free compressed map if it was sitting on the heap. */
+  ByteArray* header = ByteArray::Alloc(sizeof(RegisterMapHeader));
+  ByteArray* data = ByteArray::Alloc(ComputeRegisterMapSize(map));
+
+  memcpy(header->GetData(), map->header_, sizeof(RegisterMapHeader));
+  memcpy(data->GetData(), map->data_, ComputeRegisterMapSize(map));
+
+  method->SetRegisterMapHeader(header);
+  method->SetRegisterMapData(data);
+
+  delete map;
+  return new_map;
+}
+
+const uint8_t* DexVerifier::RegisterMapGetLine(const RegisterMap* map, int addr) {
+  int addr_width, line_width;
+  uint8_t format = map->header_->format_;
+  uint16_t num_entries = map->header_->num_entries_;
+
+  assert(num_entries > 0);
+
+  switch (format) {
+    case kRegMapFormatNone:
+      return NULL;
+    case kRegMapFormatCompact8:
+      addr_width = 1;
+      break;
+    case kRegMapFormatCompact16:
+      addr_width = 2;
+      break;
+    default:
+      LOG(ERROR) << "Unknown format " << format;
+      return NULL;
+  }
+
+  line_width = addr_width + map->header_->reg_width_;
+
+  /*
+   * Find the appropriate entry. Many maps are very small, some are very large.
+   */
+  static const int kSearchThreshold = 8;
+  const uint8_t* data = NULL;
+  int line_addr;
+
+  if (num_entries < kSearchThreshold) {
+    int i;
+    data = map->data_;
+    for (i = num_entries; i > 0; i--) {
+      line_addr = data[0];
+      if (addr_width > 1)
+        line_addr |= data[1] << 8;
+      if (line_addr == addr)
+        return data + addr_width;
+
+      data += line_width;
+    }
+    assert(data == map->data_ + line_width * num_entries);
+  } else {
+    int hi, lo, mid;
+
+    lo = 0;
+    hi = num_entries -1;
+
+    while (hi >= lo) {
+      mid = (hi + lo) / 2;
+      data = map->data_ + line_width * mid;
+
+      line_addr = data[0];
+      if (addr_width > 1)
+        line_addr |= data[1] << 8;
+
+      if (addr > line_addr) {
+        lo = mid + 1;
+      } else if (addr < line_addr) {
+        hi = mid - 1;
+      } else {
+        return data + addr_width;
+      }
+    }
+  }
+
+  return NULL;
+}
+
 void DexVerifier::OutputTypeVector(const RegType* regs, int insn_reg_count,
     uint8_t* data) {
   uint8_t val = 0;
@@ -5469,8 +5573,7 @@
 
   if (map1->header_->format_ != map2->header_->format_ ||
       map1->header_->reg_width_ != map2->header_->reg_width_ ||
-      map1->header_->num_entries_ != map2->header_->num_entries_ ||
-      map1->header_->format_on_heap_ != map2->header_->format_on_heap_) {
+      map1->header_->num_entries_ != map2->header_->num_entries_) {
     LOG(ERROR) << "CompareMaps: fields mismatch";
   }
   if (memcmp(map1->data_, map2->data_, size1) != 0) {
@@ -5719,7 +5822,7 @@
   }
 
   RegisterMap* new_map = new RegisterMap(kRegMapFormatDifferential, reg_width,
-      num_entries, true, new_map_size);
+      num_entries, new_map_size);
 
   tmp_ptr = new_map->data_;
   tmp_ptr = WriteUnsignedLeb128(tmp_ptr, new_data_size);
@@ -5761,7 +5864,7 @@
   /* Now we know enough to allocate the new map. */
   new_data_size = (new_addr_width + reg_width) * num_entries;
   RegisterMap* new_map = new RegisterMap(new_format, reg_width, num_entries,
-      true, new_data_size);
+      new_data_size);
 
   /* Write the start address and initial bits to the new map. */
   uint8_t* dst_ptr = new_map->data_;
diff --git a/src/dex_verifier.h b/src/dex_verifier.h
index 404227d..0170ebe 100644
--- a/src/dex_verifier.h
+++ b/src/dex_verifier.h
@@ -362,12 +362,9 @@
     uint8_t format_;          /* enum RegisterMapFormat; MUST be first entry */
     uint8_t reg_width_;       /* bytes per register line, 1+ */
     uint16_t num_entries_;    /* number of entries */
-    bool    format_on_heap_;  /* indicates allocation on heap */
 
-    RegisterMapHeader(uint8_t format, uint8_t reg_width, uint16_t num_entries,
-        bool format_on_heap)
-        : format_(format), reg_width_(reg_width), num_entries_(num_entries),
-          format_on_heap_(format_on_heap) {
+    RegisterMapHeader(uint8_t format, uint8_t reg_width, uint16_t num_entries)
+        : format_(format), reg_width_(reg_width), num_entries_(num_entries) {
     }
   };
 
@@ -395,9 +392,8 @@
     }
 
     RegisterMap(uint8_t format, uint8_t reg_width, uint16_t num_entries,
-        bool format_on_heap, uint32_t data_size) {
-      header_ = new RegisterMapHeader(format, reg_width, num_entries,
-          format_on_heap);
+        uint32_t data_size) {
+      header_ = new RegisterMapHeader(format, reg_width, num_entries);
       data_ = new uint8_t[data_size]();
       needs_free_ = true;
     }
@@ -561,6 +557,136 @@
     return (uint32_t) (kRegTypeUninit | (uidx << kRegTypeUninitShift));
   }
 
+  /*
+   * Generate the register map for a method that has just been verified
+   * (i.e. we're doing this as part of verification).
+   *
+   * For type-precise determination we have all the data we need, so we
+   * just need to encode it in some clever fashion.
+   *
+   * Returns a pointer to a newly-allocated RegisterMap, or NULL on failure.
+   */
+  static RegisterMap* GenerateRegisterMapV(VerifierData* vdata);
+
+  /*
+   * Get the expanded form of the register map associated with the specified
+   * method. May update the RegisterMap, possibly freeing the previous map.
+   *
+   * Returns NULL on failure (e.g. unable to expand map).
+   *
+   * NOTE: this function is not synchronized; external locking is mandatory.
+   * (This is expected to be called at GC time.)
+   */
+  static inline RegisterMap* GetExpandedRegisterMap(Method* method) {
+    if (method->GetRegisterMapHeader() == NULL ||
+        method->GetRegisterMapData() == NULL) {
+      return NULL;
+    }
+    RegisterMap* cur_map = new RegisterMap(method->GetRegisterMapHeader(),
+        method->GetRegisterMapData());
+    uint8_t format = cur_map->header_->format_;
+    if (format == kRegMapFormatCompact8 || format == kRegMapFormatCompact16) {
+      return cur_map;
+    } else {
+      return GetExpandedRegisterMapHelper(method, cur_map);
+    }
+  }
+
+  /*
+   * Get the expanded form of the register map associated with the method.
+   *
+   * If the map is already in one of the uncompressed formats, we return
+   * immediately.  Otherwise, we expand the map and replace method's register
+   * map pointer, freeing it if it was allocated on the heap.
+   *
+   * NOTE: this function is not synchronized; external locking is mandatory
+   * (unless we're in the zygote, where single-threaded access is guaranteed).
+   */
+  static RegisterMap* GetExpandedRegisterMapHelper(Method* method,
+      RegisterMap* map);
+
+  /* Return the data for the specified address, or NULL if not found. */
+  static const uint8_t* RegisterMapGetLine(const RegisterMap* map, int addr);
+
+  /*
+   * Determine if the RegType value is a reference type.
+   *
+   * Ordinarily we include kRegTypeZero in the "is it a reference"
+   * check. There's no value in doing so here, because we know
+   * the register can't hold anything but zero.
+   */
+  static inline bool IsReferenceType(RegType type) {
+    return (type > kRegTypeMAX || type == kRegTypeUninit);
+  }
+
+  /* Toggle the value of the "idx"th bit in "ptr". */
+  static inline void ToggleBit(uint8_t* ptr, int idx) {
+    ptr[idx >> 3] ^= 1 << (idx & 0x07);
+  }
+
+  /*
+   * Given a line of registers, output a bit vector that indicates whether
+   * or not the register holds a reference type (which could be null).
+   *
+   * We use '1' to indicate it's a reference, '0' for anything else (numeric
+   * value, uninitialized data, merge conflict). Register 0 will be found
+   * in the low bit of the first byte.
+   */
+  static void OutputTypeVector(const RegType* regs, int insn_reg_count,
+      uint8_t* data);
+
+  /*
+   * Double-check the map.
+   *
+   * We run through all of the data in the map, and compare it to the original.
+   * Only works on uncompressed data.
+   */
+  static bool VerifyMap(VerifierData* vdata, const RegisterMap* map);
+
+  /* Compare two register maps. Returns true if they're equal, false if not. */
+  static bool CompareMaps(const RegisterMap* map1, const RegisterMap* map2);
+
+  /* Compute the size, in bytes, of a register map. */
+  static size_t ComputeRegisterMapSize(const RegisterMap* map);
+
+  /*
+   * Compute the difference between two bit vectors.
+   *
+   * If "leb_out_buf" is non-NULL, we output the bit indices in ULEB128 format
+   * as we go. Otherwise, we just generate the various counts.
+   *
+   * The bit vectors are compared byte-by-byte, so any unused bits at the
+   * end must be zero.
+   *
+   * Returns the number of bytes required to hold the ULEB128 output.
+   *
+   * If "first_bit_changed_ptr" or "num_bits_changed_ptr" are non-NULL, they
+   * will receive the index of the first changed bit and the number of changed
+   * bits, respectively.
+   */
+  static int ComputeBitDiff(const uint8_t* bits1, const uint8_t* bits2,
+      int byte_width, int* first_bit_changed_ptr, int* num_bits_changed_ptr,
+      uint8_t* leb_out_buf);
+
+  /*
+   * Compress the register map with differential encoding.
+   *
+   * On success, returns a newly-allocated RegisterMap. If the map is not
+   * compatible for some reason, or fails to get smaller, this will return NULL.
+   */
+  static RegisterMap* CompressMapDifferential(const RegisterMap* map);
+
+  /*
+   * Expand a compressed map to an uncompressed form.
+   *
+   * Returns a newly-allocated RegisterMap on success, or NULL on failure.
+   *
+   * TODO: consider using the linear allocator or a custom allocator with
+   * LRU replacement for these instead of the native heap.
+   */
+  static RegisterMap* UncompressMapDifferential(const RegisterMap* map);
+
+
   /* Verify a class. Returns "true" on success. */
   static bool VerifyClass(Class* klass);
 
@@ -1543,95 +1669,6 @@
       const Instruction::DecodedInstruction* dec_insn, MethodType method_type,
       bool is_range, bool is_super, VerifyError* failure);
 
-  /*
-   * Generate the register map for a method that has just been verified
-   * (i.e. we're doing this as part of verification).
-   *
-   * For type-precise determination we have all the data we need, so we
-   * just need to encode it in some clever fashion.
-   *
-   * Returns a pointer to a newly-allocated RegisterMap, or NULL on failure.
-   */
-  static RegisterMap* GenerateRegisterMapV(VerifierData* vdata);
-
-  /*
-   * Determine if the RegType value is a reference type.
-   *
-   * Ordinarily we include kRegTypeZero in the "is it a reference"
-   * check. There's no value in doing so here, because we know
-   * the register can't hold anything but zero.
-   */
-  static inline bool IsReferenceType(RegType type) {
-    return (type > kRegTypeMAX || type == kRegTypeUninit);
-  }
-
-  /* Toggle the value of the "idx"th bit in "ptr". */
-  static inline void ToggleBit(uint8_t* ptr, int idx) {
-    ptr[idx >> 3] ^= 1 << (idx & 0x07);
-  }
-
-  /*
-   * Given a line of registers, output a bit vector that indicates whether
-   * or not the register holds a reference type (which could be null).
-   *
-   * We use '1' to indicate it's a reference, '0' for anything else (numeric
-   * value, uninitialized data, merge conflict). Register 0 will be found
-   * in the low bit of the first byte.
-   */
-  static void OutputTypeVector(const RegType* regs, int insn_reg_count,
-      uint8_t* data);
-
-  /*
-   * Double-check the map.
-   *
-   * We run through all of the data in the map, and compare it to the original.
-   * Only works on uncompressed data.
-   */
-  static bool VerifyMap(VerifierData* vdata, const RegisterMap* map);
-
-  /* Compare two register maps. Returns true if they're equal, false if not. */
-  static bool CompareMaps(const RegisterMap* map1, const RegisterMap* map2);
-
-  /* Compute the size, in bytes, of a register map. */
-  static size_t ComputeRegisterMapSize(const RegisterMap* map);
-
-  /*
-   * Compute the difference between two bit vectors.
-   *
-   * If "leb_out_buf" is non-NULL, we output the bit indices in ULEB128 format
-   * as we go. Otherwise, we just generate the various counts.
-   *
-   * The bit vectors are compared byte-by-byte, so any unused bits at the
-   * end must be zero.
-   *
-   * Returns the number of bytes required to hold the ULEB128 output.
-   *
-   * If "first_bit_changed_ptr" or "num_bits_changed_ptr" are non-NULL, they
-   * will receive the index of the first changed bit and the number of changed
-   * bits, respectively.
-   */
-  static int ComputeBitDiff(const uint8_t* bits1, const uint8_t* bits2,
-      int byte_width, int* first_bit_changed_ptr, int* num_bits_changed_ptr,
-      uint8_t* leb_out_buf);
-
-  /*
-   * Compress the register map with differential encoding.
-   *
-   * On success, returns a newly-allocated RegisterMap. If the map is not
-   * compatible for some reason, or fails to get smaller, this will return NULL.
-   */
-  static RegisterMap* CompressMapDifferential(const RegisterMap* map);
-
-  /*
-   * Expand a compressed map to an uncompressed form.
-   *
-   * Returns a newly-allocated RegisterMap on success, or NULL on failure.
-   *
-   * TODO: consider using the linear allocator or a custom allocator with
-   * LRU replacement for these instead of the native heap.
-   */
-  static RegisterMap* UncompressMapDifferential(const RegisterMap* map);
-
   DISALLOW_COPY_AND_ASSIGN(DexVerifier);
 };
 
diff --git a/src/image_test.cc b/src/image_test.cc
index 8f3f0b7..7ecdf3c 100644
--- a/src/image_test.cc
+++ b/src/image_test.cc
@@ -86,7 +86,7 @@
     EXPECT_TRUE(klass != NULL) << descriptor;
     EXPECT_LT(boot_base, reinterpret_cast<byte*>(klass)) << descriptor;
     EXPECT_LT(reinterpret_cast<byte*>(klass), boot_limit) << descriptor;
-    EXPECT_TRUE(klass->GetMonitor() == NULL);  // address should have been removed from monitor
+    EXPECT_EQ(*klass->GetRawLockWordAddress(), 0);  // address should have been removed from monitor
   }
 }
 
diff --git a/src/image_writer.h b/src/image_writer.h
index e91be01..c1480ff 100644
--- a/src/image_writer.h
+++ b/src/image_writer.h
@@ -31,7 +31,7 @@
   // we use the lock word to store the offset of the object in the image
   void AssignImageOffset(Object* object) {
     DCHECK(object != NULL);
-    DCHECK(object->GetMonitor() == NULL);  // should be no lock
+    DCHECK(object->monitor_ == 0);  // should be no lock
     SetImageOffset(object, image_top_);
     image_top_ += RoundUp(object->SizeOf(), 8);  // 64-bit alignment
     DCHECK_LT(image_top_, image_->GetLength());
@@ -39,25 +39,25 @@
   static void SetImageOffset(Object* object, size_t offset) {
     DCHECK(object != NULL);
     // should be no lock (but it might be forward referenced interned string)
-    DCHECK(object->GetMonitor() == NULL || object->IsString());
+    DCHECK(object->monitor_ == 0 || object->IsString());
     DCHECK_NE(0U, offset);
-    object->SetMonitor(reinterpret_cast<Monitor*>(offset));
+    object->monitor_ = offset;
   }
   static size_t IsImageOffsetAssigned(const Object* object) {
     DCHECK(object != NULL);
-    size_t offset = reinterpret_cast<size_t>(object->GetMonitor());
+    size_t offset = object->monitor_;
     return offset != 0U;
   }
   static size_t GetImageOffset(const Object* object) {
     DCHECK(object != NULL);
-    size_t offset = reinterpret_cast<size_t>(object->GetMonitor());
+    size_t offset = object->monitor_;
     DCHECK_NE(0U, offset);
     return offset;
   }
   static void ResetImageOffset(Object* object) {
     DCHECK(object != NULL);
-    DCHECK(object->GetMonitor() != NULL);  // should be an offset
-    object->SetMonitor(reinterpret_cast<Monitor*>(0));
+    DCHECK(object->monitor_ != 0);  // should be an offset
+    object->monitor_ = 0;
   }
 
   bool InSourceSpace(const Object* object) {
diff --git a/src/java_lang_System.cc b/src/java_lang_System.cc
index 398eeb9..fc13839 100644
--- a/src/java_lang_System.cc
+++ b/src/java_lang_System.cc
@@ -105,7 +105,7 @@
 namespace {
 
 void ThrowArrayStoreException_NotAnArray(const char* identifier, Object* array) {
-  std::string actualType(PrettyType(array));
+  std::string actualType(PrettyTypeOf(array));
   Thread::Current()->ThrowNewException("Ljava/lang/ArrayStoreException;", "%s is not an array: %s", identifier, actualType.c_str());
 }
 
@@ -153,8 +153,8 @@
   if (srcComponentType->IsPrimitive() || dstComponentType->IsPrimitive()) {
     // If one of the arrays holds a primitive type the other array must hold the exact same type.
     if (srcComponentType->IsPrimitive() != dstComponentType->IsPrimitive() || srcComponentType != dstComponentType) {
-      std::string srcType(PrettyType(srcArray));
-      std::string dstType(PrettyType(dstArray));
+      std::string srcType(PrettyTypeOf(srcArray));
+      std::string dstType(PrettyTypeOf(dstArray));
       self->ThrowNewException("Ljava/lang/ArrayStoreException;",
           "Incompatible types: src=%s, dst=%s", srcType.c_str(), dstType.c_str());
       return;
@@ -175,7 +175,7 @@
       move32(dstBytes + dstPos * 8, srcBytes + srcPos * 8, length * 8);
       break;
     default:
-      LOG(FATAL) << "Unknown primitive array type: " << PrettyType(srcArray);
+      LOG(FATAL) << "Unknown primitive array type: " << PrettyTypeOf(srcArray);
     }
 
     return;
@@ -229,8 +229,8 @@
   move32(dstBytes + dstPos * width, srcBytes + srcPos * width, copyCount * width);
   Heap::WriteBarrier(dstArray);
   if (copyCount != length) {
-    std::string actualSrcType(PrettyType(srcObj[copyCount]));
-    std::string dstType(PrettyType(dstArray));
+    std::string actualSrcType(PrettyTypeOf(srcObj[copyCount]));
+    std::string dstType(PrettyTypeOf(dstArray));
     self->ThrowNewException("Ljava/lang/ArrayStoreException;",
         "source[%d] of type %s cannot be stored in destination array of type %s",
         srcPos + copyCount, actualSrcType.c_str(), dstType.c_str());
diff --git a/src/java_lang_Thread.cc b/src/java_lang_Thread.cc
index 286a211..d2f1349 100644
--- a/src/java_lang_Thread.cc
+++ b/src/java_lang_Thread.cc
@@ -53,31 +53,26 @@
 
 jboolean Thread_nativeHoldsLock(JNIEnv* env, jobject javaThread, jobject javaObject) {
   ThreadListLock lock;
-  //Thread* thread = Thread::FromManagedThread(env, javaThread);
-  //Object* object = dvmDecodeIndirectRef(env, javaObject);
-  //if (object == NULL) {
-    //dvmThrowNullPointerException("object == null");
-    //return JNI_FALSE;
-  //}
-  //Thread* thread = Thread::FromManagedThread(env, javaThread);
-  //int result = dvmHoldsLock(thread, object);
-  //return result;
-  UNIMPLEMENTED(FATAL);
-  return JNI_FALSE;
+  Object* object = Decode<Object*>(env, javaObject);
+  if (object == NULL) {
+    Thread::Current()->ThrowNewException("Ljava/lang/NullPointerException;", "object == null");
+    return JNI_FALSE;
+  }
+  Thread* thread = Thread::FromManagedThread(env, javaThread);
+  return thread->HoldsLock(object);
 }
 
 void Thread_nativeInterrupt(JNIEnv* env, jobject javaThread) {
   ThreadListLock lock;
-  UNIMPLEMENTED(FATAL);
-  //Thread* thread = Thread::FromManagedThread(env, javaThread);
-  //if (thread != NULL) {
-    //dvmThreadInterrupt(thread);
-  //}
+  Thread* thread = Thread::FromManagedThread(env, javaThread);
+  if (thread != NULL) {
+    thread->Interrupt();
+  }
 }
 
 void Thread_nativeSetName(JNIEnv* env, jobject javaThread, jstring javaName) {
   ThreadListLock lock;
-  UNIMPLEMENTED(WARNING);
+  // TODO: needed for debugging (DDMS) support.
   //Thread* thread = Thread::FromManagedThread(env, javaThread);
   //StringObject* nameStr = (StringObject*) dvmDecodeIndirectRef(env, javaName);
   //int threadId = (thread != NULL) ? thread->threadId : -1;
diff --git a/src/jni_internal.cc b/src/jni_internal.cc
index b9b1ab0..94ef729 100644
--- a/src/jni_internal.cc
+++ b/src/jni_internal.cc
@@ -82,9 +82,8 @@
   if (env->check_jni) {
     size_t entry_count = locals.Capacity();
     if (entry_count > 16) {
-      std::string class_descriptor(PrettyDescriptor(obj->GetClass()->GetDescriptor()));
       LOG(WARNING) << "Warning: more than 16 JNI local references: "
-                   << entry_count << " (most recent was a " << class_descriptor << ")";
+                   << entry_count << " (most recent was a " << PrettyTypeOf(obj) << ")";
       locals.Dump();
       // TODO: dvmDumpThread(dvmThreadSelf(), false);
       // dvmAbort();
@@ -384,7 +383,7 @@
 }
 
 void ThrowAIOOBE(ScopedJniThreadState& ts, Array* array, jsize start, jsize length, const char* identifier) {
-  std::string type(PrettyType(array));
+  std::string type(PrettyTypeOf(array));
   ts.Self()->ThrowNewException("Ljava/lang/ArrayIndexOutOfBoundsException;",
       "%s offset=%d length=%d %s.length=%d",
       type.c_str(), start, length, identifier, array->GetLength());
@@ -464,9 +463,9 @@
         handle_(handle),
         class_loader_(class_loader),
         jni_on_load_lock_("JNI_OnLoad lock"),
+        jni_on_load_cond_("JNI_OnLoad"),
         jni_on_load_thread_id_(Thread::Current()->GetThinLockId()),
         jni_on_load_result_(kPending) {
-    CHECK_PTHREAD_CALL(pthread_cond_init, (&jni_on_load_cond_, NULL), "jni_on_load_cond_");
   }
 
   Object* GetClassLoader() {
@@ -498,7 +497,7 @@
                   << "JNI_OnLoad...]";
       }
       ScopedThreadStateChange tsc(self, Thread::kVmWait);
-      CHECK_PTHREAD_CALL(pthread_cond_wait, (&jni_on_load_cond_, jni_on_load_lock_.GetImpl()), "JNI_OnLoad");
+      jni_on_load_cond_.Wait(jni_on_load_lock_);
     }
 
     bool okay = (jni_on_load_result_ == kOkay);
@@ -515,7 +514,7 @@
 
     // Broadcast a wakeup to anybody sleeping on the condition variable.
     MutexLock mu(jni_on_load_lock_);
-    CHECK_PTHREAD_CALL(pthread_cond_broadcast, (&jni_on_load_cond_), "JNI_OnLoad");
+    jni_on_load_cond_.Broadcast();
   }
 
   void* FindSymbol(const std::string& symbol_name) {
@@ -541,7 +540,7 @@
   // Guards remaining items.
   Mutex jni_on_load_lock_;
   // Wait for JNI_OnLoad in other thread.
-  pthread_cond_t jni_on_load_cond_;
+  ConditionVariable jni_on_load_cond_;
   // Recursive invocation guard.
   uint32_t jni_on_load_thread_id_;
   // Result of earlier JNI_OnLoad call.
@@ -722,7 +721,7 @@
       return JNI_ERR;
     }
 
-    LOG(INFO) << "Throwing " << PrettyType(Decode<Throwable*>(ts, exception.get()))
+    LOG(INFO) << "Throwing " << PrettyTypeOf(Decode<Throwable*>(ts, exception.get()))
               << ": " << msg;
     ts.Self()->SetException(Decode<Throwable*>(ts, exception.get()));
 
@@ -751,11 +750,11 @@
     jmethodID mid = env->GetMethodID(exception_class.get(), "printStackTrace", "()V");
     if (mid == NULL) {
       LOG(WARNING) << "JNI WARNING: no printStackTrace()V in "
-                   << PrettyType(original_exception);
+                   << PrettyTypeOf(original_exception);
     } else {
       env->CallVoidMethod(exception.get(), mid);
       if (self->IsExceptionPending()) {
-        LOG(WARNING) << "JNI WARNING: " << PrettyType(self->GetException())
+        LOG(WARNING) << "JNI WARNING: " << PrettyTypeOf(self->GetException())
                      << " thrown while calling printStackTrace";
         self->ClearException();
       }
@@ -2164,8 +2163,7 @@
     Class* c = Decode<Class*>(ts, java_class);
 
     if (ts.Vm()->verbose_jni) {
-      LOG(INFO) << "[Unregistering JNI native methods for "
-                << PrettyDescriptor(c->GetDescriptor()) << "]";
+      LOG(INFO) << "[Unregistering JNI native methods for " << PrettyClass(c) << "]";
     }
 
     for (size_t i = 0; i < c->NumDirectMethods(); ++i) {
@@ -2186,13 +2184,13 @@
 
   static jint MonitorEnter(JNIEnv* env, jobject java_object) {
     ScopedJniThreadState ts(env);
-    Decode<Object*>(ts, java_object)->MonitorEnter();
+    Decode<Object*>(ts, java_object)->MonitorEnter(ts.Self());
     return ts.Self()->IsExceptionPending() ? JNI_ERR : JNI_OK;
   }
 
   static jint MonitorExit(JNIEnv* env, jobject java_object) {
     ScopedJniThreadState ts(env);
-    Decode<Object*>(ts, java_object)->MonitorExit();
+    Decode<Object*>(ts, java_object)->MonitorExit(ts.Self());
     return ts.Self()->IsExceptionPending() ? JNI_ERR : JNI_OK;
   }
 
diff --git a/src/monitor.cc b/src/monitor.cc
new file mode 100644
index 0000000..1a5e6c9
--- /dev/null
+++ b/src/monitor.cc
@@ -0,0 +1,887 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "monitor.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <stdlib.h>
+#include <sys/time.h>
+#include <time.h>
+#include <unistd.h>
+
+#include "mutex.h"
+#include "object.h"
+#include "thread.h"
+
+namespace art {
+
+/*
+ * Every Object has a monitor associated with it, but not every Object is
+ * actually locked.  Even the ones that are locked do not need a
+ * full-fledged monitor until a) there is actual contention or b) wait()
+ * is called on the Object.
+ *
+ * For Android, we have implemented a scheme similar to the one described
+ * in Bacon et al.'s "Thin locks: featherweight synchronization for Java"
+ * (ACM 1998).  Things are even easier for us, though, because we have
+ * a full 32 bits to work with.
+ *
+ * The two states of an Object's lock are referred to as "thin" and
+ * "fat".  A lock may transition from the "thin" state to the "fat"
+ * state and this transition is referred to as inflation.  Once a lock
+ * has been inflated it remains in the "fat" state indefinitely.
+ *
+ * The lock value itself is stored in Object.lock.  The LSB of the
+ * lock encodes its state.  When cleared, the lock is in the "thin"
+ * state and its bits are formatted as follows:
+ *
+ *    [31 ---- 19] [18 ---- 3] [2 ---- 1] [0]
+ *     lock count   thread id  hash state  0
+ *
+ * When set, the lock is in the "fat" state and its bits are formatted
+ * as follows:
+ *
+ *    [31 ---- 3] [2 ---- 1] [0]
+ *      pointer   hash state  1
+ *
+ * For an in-depth description of the mechanics of thin-vs-fat locking,
+ * read the paper referred to above.
+ *
+ * Monitors provide:
+ *  - mutually exclusive access to resources
+ *  - a way for multiple threads to wait for notification
+ *
+ * In effect, they fill the role of both mutexes and condition variables.
+ *
+ * Only one thread can own the monitor at any time.  There may be several
+ * threads waiting on it (the wait call unlocks it).  One or more waiting
+ * threads may be getting interrupted or notified at any given time.
+ *
+ * TODO: the various members of monitor are not SMP-safe.
+ */
+
+
+/*
+ * Monitor accessor.  Extracts a monitor structure pointer from a fat
+ * lock.  Performs no error checking.
+ */
+#define LW_MONITOR(x) \
+  ((Monitor*)((x) & ~((LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT) | LW_SHAPE_MASK)))
+
+/*
+ * Lock recursion count field.  Contains a count of the number of times
+ * a lock has been recursively acquired.
+ */
+#define LW_LOCK_COUNT_MASK 0x1fff
+#define LW_LOCK_COUNT_SHIFT 19
+#define LW_LOCK_COUNT(x) (((x) >> LW_LOCK_COUNT_SHIFT) & LW_LOCK_COUNT_MASK)
+
+Monitor::Monitor(Object* obj)
+    : owner_(NULL),
+      lock_count_(0),
+      obj_(obj),
+      wait_set_(NULL),
+      lock_("a monitor lock"),
+      next_(NULL),
+      owner_filename_(NULL),
+      owner_line_number_(0) {
+}
+
+Monitor::~Monitor() {
+  DCHECK(obj_ != NULL);
+  DCHECK_EQ(LW_SHAPE(*obj_->GetRawLockWordAddress()), LW_SHAPE_FAT);
+
+#ifndef NDEBUG
+  /* This lock is associated with an object
+   * that's being swept.  The only possible way
+   * anyone could be holding this lock would be
+   * if some JNI code locked but didn't unlock
+   * the object, in which case we've got some bad
+   * native code somewhere.
+   */
+  DCHECK(lock_.TryLock());
+  lock_.Unlock();
+#endif
+}
+
+/*
+ * Links a thread into a monitor's wait set.  The monitor lock must be
+ * held by the caller of this routine.
+ */
+void Monitor::AppendToWaitSet(Thread* thread) {
+  DCHECK(owner_ == Thread::Current());
+  DCHECK(thread != NULL);
+  DCHECK(thread->wait_next_ == NULL);
+  if (wait_set_ == NULL) {
+    wait_set_ = thread;
+    return;
+  }
+
+  // push_back.
+  Thread* t = wait_set_;
+  while (t->wait_next_ != NULL) {
+    t = t->wait_next_;
+  }
+  t->wait_next_ = thread;
+}
+
+/*
+ * Unlinks a thread from a monitor's wait set.  The monitor lock must
+ * be held by the caller of this routine.
+ */
+void Monitor::RemoveFromWaitSet(Thread *thread) {
+  DCHECK(owner_ == Thread::Current());
+  DCHECK(thread != NULL);
+  if (wait_set_ == NULL) {
+    return;
+  }
+  if (wait_set_ == thread) {
+    wait_set_ = thread->wait_next_;
+    thread->wait_next_ = NULL;
+    return;
+  }
+
+  Thread* t = wait_set_;
+  while (t->wait_next_ != NULL) {
+    if (t->wait_next_ == thread) {
+      t->wait_next_ = thread->wait_next_;
+      thread->wait_next_ = NULL;
+      return;
+    }
+    t = t->wait_next_;
+  }
+}
+
+// Global list of all monitors. Used for cleanup.
+static Monitor* gMonitorList = NULL;
+
+void Monitor::FreeMonitorList() {
+  Monitor* m = gMonitorList;
+  while (m != NULL) {
+    Monitor* next = m->next_;
+    delete m;
+    m = next;
+  }
+}
+
+/*
+ * Frees monitor objects belonging to unmarked objects.
+ */
+static void SweepMonitorList(Monitor** mon, bool (isUnmarkedObject)(void*)) {
+  UNIMPLEMENTED(FATAL);
+#if 0
+  Monitor handle;
+  Monitor *curr;
+
+  assert(mon != NULL);
+  assert(isUnmarkedObject != NULL);
+  Monitor* prev = &handle;
+  prev->next = curr = *mon;
+  while (curr != NULL) {
+    Object* obj = curr->obj;
+    if ((*isUnmarkedObject)(obj) != 0) {
+      prev->next = curr->next;
+      delete curr;
+      curr = prev->next;
+    } else {
+      prev = curr;
+      curr = curr->next;
+    }
+  }
+  *mon = handle.next;
+#endif
+}
+
+void Monitor::SweepMonitorList(bool (isUnmarkedObject)(void*)) {
+  ::art::SweepMonitorList(&gMonitorList, isUnmarkedObject);
+}
+
+/*
+static char *logWriteInt(char *dst, int value) {
+  *dst++ = EVENT_TYPE_INT;
+  set4LE((uint8_t *)dst, value);
+  return dst + 4;
+}
+
+static char *logWriteString(char *dst, const char *value, size_t len) {
+  *dst++ = EVENT_TYPE_STRING;
+  len = len < 32 ? len : 32;
+  set4LE((uint8_t *)dst, len);
+  dst += 4;
+  memcpy(dst, value, len);
+  return dst + len;
+}
+
+#define EVENT_LOG_TAG_dvm_lock_sample 20003
+
+static void logContentionEvent(Thread *self, uint32_t waitMs, uint32_t samplePercent,
+                               const char *ownerFileName, uint32_t ownerLineNumber)
+{
+    const StackSaveArea *saveArea;
+    const Method *meth;
+    uint32_t relativePc;
+    char eventBuffer[174];
+    const char *fileName;
+    char procName[33];
+    char *cp;
+    size_t len;
+    int fd;
+
+    saveArea = SAVEAREA_FROM_FP(self->interpSave.curFrame);
+    meth = saveArea->method;
+    cp = eventBuffer;
+
+    // Emit the event list length, 1 byte.
+    *cp++ = 9;
+
+    // Emit the process name, <= 37 bytes.
+    fd = open("/proc/self/cmdline", O_RDONLY);
+    memset(procName, 0, sizeof(procName));
+    read(fd, procName, sizeof(procName) - 1);
+    close(fd);
+    len = strlen(procName);
+    cp = logWriteString(cp, procName, len);
+
+    // Emit the sensitive thread ("main thread") status, 5 bytes.
+    bool isSensitive = false;
+    if (gDvm.isSensitiveThreadHook != NULL) {
+        isSensitive = gDvm.isSensitiveThreadHook();
+    }
+    cp = logWriteInt(cp, isSensitive);
+
+    // Emit self thread name string, <= 37 bytes.
+    std::string selfName = dvmGetThreadName(self);
+    cp = logWriteString(cp, selfName.c_str(), selfName.size());
+
+    // Emit the wait time, 5 bytes.
+    cp = logWriteInt(cp, waitMs);
+
+    // Emit the source code file name, <= 37 bytes.
+    fileName = dvmGetMethodSourceFile(meth);
+    if (fileName == NULL) fileName = "";
+    cp = logWriteString(cp, fileName, strlen(fileName));
+
+    // Emit the source code line number, 5 bytes.
+    relativePc = saveArea->xtra.currentPc - saveArea->method->insns;
+    cp = logWriteInt(cp, dvmLineNumFromPC(meth, relativePc));
+
+    // Emit the lock owner source code file name, <= 37 bytes.
+    if (ownerFileName == NULL) {
+        ownerFileName = "";
+    } else if (strcmp(fileName, ownerFileName) == 0) {
+        // Common case, so save on log space.
+        ownerFileName = "-";
+    }
+    cp = logWriteString(cp, ownerFileName, strlen(ownerFileName));
+
+    // Emit the source code line number, 5 bytes.
+    cp = logWriteInt(cp, ownerLineNumber);
+
+    // Emit the sample percentage, 5 bytes.
+    cp = logWriteInt(cp, samplePercent);
+
+    assert((size_t)(cp - eventBuffer) <= sizeof(eventBuffer));
+    android_btWriteLog(EVENT_LOG_TAG_dvm_lock_sample,
+                       EVENT_TYPE_LIST,
+                       eventBuffer,
+                       (size_t)(cp - eventBuffer));
+}
+*/
+
+void Monitor::Lock(Thread* self) {
+//  uint32_t waitThreshold, samplePercent;
+//  uint64_t waitStart, waitEnd, waitMs;
+
+  if (owner_ == self) {
+    lock_count_++;
+    return;
+  }
+  if (!lock_.TryLock()) {
+    {
+      ScopedThreadStateChange tsc(self, Thread::kBlocked);
+//      waitThreshold = gDvm.lockProfThreshold;
+//      if (waitThreshold) {
+//        waitStart = dvmGetRelativeTimeUsec();
+//      }
+//      const char* currentOwnerFileName = mon->ownerFileName;
+//      uint32_t currentOwnerLineNumber = mon->ownerLineNumber;
+
+      lock_.Lock();
+//      if (waitThreshold) {
+//        waitEnd = dvmGetRelativeTimeUsec();
+//      }
+    }
+//    if (waitThreshold) {
+//      waitMs = (waitEnd - waitStart) / 1000;
+//      if (waitMs >= waitThreshold) {
+//        samplePercent = 100;
+//      } else {
+//        samplePercent = 100 * waitMs / waitThreshold;
+//      }
+//      if (samplePercent != 0 && ((uint32_t)rand() % 100 < samplePercent)) {
+//        logContentionEvent(self, waitMs, samplePercent, currentOwnerFileName, currentOwnerLineNumber);
+//      }
+//    }
+  }
+  owner_ = self;
+  DCHECK_EQ(lock_count_, 0);
+
+  // When debugging, save the current monitor holder for future
+  // acquisition failures to use in sampled logging.
+//  if (gDvm.lockProfThreshold > 0) {
+//    const StackSaveArea *saveArea;
+//    const Method *meth;
+//    mon->ownerLineNumber = 0;
+//    if (self->interpSave.curFrame == NULL) {
+//      mon->ownerFileName = "no_frame";
+//    } else if ((saveArea = SAVEAREA_FROM_FP(self->interpSave.curFrame)) == NULL) {
+//      mon->ownerFileName = "no_save_area";
+//    } else if ((meth = saveArea->method) == NULL) {
+//      mon->ownerFileName = "no_method";
+//    } else {
+//      uint32_t relativePc = saveArea->xtra.currentPc - saveArea->method->insns;
+//      mon->ownerFileName = (char*) dvmGetMethodSourceFile(meth);
+//      if (mon->ownerFileName == NULL) {
+//        mon->ownerFileName = "no_method_file";
+//      } else {
+//        mon->ownerLineNumber = dvmLineNumFromPC(meth, relativePc);
+//      }
+//    }
+//  }
+}
+
+void ThrowIllegalMonitorStateException(const char* msg) {
+  Thread::Current()->ThrowNewException("Ljava/lang/IllegalMonitorStateException;", "%s", msg);
+}
+
+bool Monitor::Unlock(Thread* self) {
+  DCHECK(self != NULL);
+  if (owner_ == self) {
+    // We own the monitor, so nobody else can be in here.
+    if (lock_count_ == 0) {
+      owner_ = NULL;
+      owner_filename_ = "unlocked";
+      owner_line_number_ = 0;
+      lock_.Unlock();
+    } else {
+      --lock_count_;
+    }
+  } else {
+    // We don't own this, so we're not allowed to unlock it.
+    // The JNI spec says that we should throw IllegalMonitorStateException
+    // in this case.
+    ThrowIllegalMonitorStateException("unlock of unowned monitor");
+    return false;
+  }
+  return true;
+}
+
+/*
+ * Converts the given relative waiting time into an absolute time.
+ */
+void ToAbsoluteTime(int64_t ms, int32_t ns, struct timespec *ts) {
+  int64_t endSec;
+
+#ifdef HAVE_TIMEDWAIT_MONOTONIC
+  clock_gettime(CLOCK_MONOTONIC, ts);
+#else
+  {
+    struct timeval tv;
+    gettimeofday(&tv, NULL);
+    ts->tv_sec = tv.tv_sec;
+    ts->tv_nsec = tv.tv_usec * 1000;
+  }
+#endif
+  endSec = ts->tv_sec + ms / 1000;
+  if (endSec >= 0x7fffffff) {
+    LOG(INFO) << "Note: end time exceeds epoch";
+    endSec = 0x7ffffffe;
+  }
+  ts->tv_sec = endSec;
+  ts->tv_nsec = (ts->tv_nsec + (ms % 1000) * 1000000) + ns;
+
+  // Catch rollover.
+  if (ts->tv_nsec >= 1000000000L) {
+    ts->tv_sec++;
+    ts->tv_nsec -= 1000000000L;
+  }
+}
+
+int dvmRelativeCondWait(pthread_cond_t* cond, pthread_mutex_t* mutex, int64_t ms, int32_t ns) {
+  struct timespec ts;
+  ToAbsoluteTime(ms, ns, &ts);
+#if defined(HAVE_TIMEDWAIT_MONOTONIC)
+  int rc = pthread_cond_timedwait_monotonic(cond, mutex, &ts);
+#else
+  int rc = pthread_cond_timedwait(cond, mutex, &ts);
+#endif
+  DCHECK(rc == 0 || rc == ETIMEDOUT);
+  return rc;
+}
+
+/*
+ * Wait on a monitor until timeout, interrupt, or notification.  Used for
+ * Object.wait() and (somewhat indirectly) Thread.sleep() and Thread.join().
+ *
+ * If another thread calls Thread.interrupt(), we throw InterruptedException
+ * and return immediately if one of the following are true:
+ *  - blocked in wait(), wait(long), or wait(long, int) methods of Object
+ *  - blocked in join(), join(long), or join(long, int) methods of Thread
+ *  - blocked in sleep(long), or sleep(long, int) methods of Thread
+ * Otherwise, we set the "interrupted" flag.
+ *
+ * Checks to make sure that "ns" is in the range 0-999999
+ * (i.e. fractions of a millisecond) and throws the appropriate
+ * exception if it isn't.
+ *
+ * The spec allows "spurious wakeups", and recommends that all code using
+ * Object.wait() do so in a loop.  This appears to derive from concerns
+ * about pthread_cond_wait() on multiprocessor systems.  Some commentary
+ * on the web casts doubt on whether these can/should occur.
+ *
+ * Since we're allowed to wake up "early", we clamp extremely long durations
+ * to return at the end of the 32-bit time epoch.
+ */
+void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, bool interruptShouldThrow) {
+  DCHECK(self != NULL);
+
+  // Make sure that we hold the lock.
+  if (owner_ != self) {
+    ThrowIllegalMonitorStateException("object not locked by thread before wait()");
+    return;
+  }
+
+  // Enforce the timeout range.
+  if (ms < 0 || ns < 0 || ns > 999999) {
+    Thread::Current()->ThrowNewException("Ljava/lang/IllegalArgumentException;",
+        "timeout arguments out of range: ms=%lld ns=%d", ms, ns);
+    return;
+  }
+
+  // Compute absolute wakeup time, if necessary.
+  struct timespec ts;
+  bool timed = false;
+  if (ms != 0 || ns != 0) {
+    ToAbsoluteTime(ms, ns, &ts);
+    timed = true;
+  }
+
+  /*
+   * Add ourselves to the set of threads waiting on this monitor, and
+   * release our hold.  We need to let it go even if we're a few levels
+   * deep in a recursive lock, and we need to restore that later.
+   *
+   * We append to the wait set ahead of clearing the count and owner
+   * fields so the subroutine can check that the calling thread owns
+   * the monitor.  Aside from that, the order of member updates is
+   * not order sensitive as we hold the pthread mutex.
+   */
+  AppendToWaitSet(self);
+  int prevLockCount = lock_count_;
+  lock_count_ = 0;
+  owner_ = NULL;
+  const char* savedFileName = owner_filename_;
+  owner_filename_ = NULL;
+  uint32_t savedLineNumber = owner_line_number_;
+  owner_line_number_ = 0;
+
+  /*
+   * Update thread status.  If the GC wakes up, it'll ignore us, knowing
+   * that we won't touch any references in this state, and we'll check
+   * our suspend mode before we transition out.
+   */
+  if (timed) {
+    self->SetState(Thread::kTimedWaiting);
+  } else {
+    self->SetState(Thread::kWaiting);
+  }
+
+  self->wait_mutex_.Lock();
+
+  /*
+   * Set wait_monitor_ to the monitor object we will be waiting on.
+   * When wait_monitor_ is non-NULL a notifying or interrupting thread
+   * must signal the thread's wait_cond_ to wake it up.
+   */
+  DCHECK(self->wait_monitor_ == NULL);
+  self->wait_monitor_ = this;
+
+  /*
+   * Handle the case where the thread was interrupted before we called
+   * wait().
+   */
+  bool wasInterrupted = false;
+  if (self->interrupted_) {
+    wasInterrupted = true;
+    self->wait_monitor_ = NULL;
+    self->wait_mutex_.Unlock();
+    goto done;
+  }
+
+  /*
+   * Release the monitor lock and wait for a notification or
+   * a timeout to occur.
+   */
+  lock_.Unlock();
+
+  if (!timed) {
+    self->wait_cond_.Wait(self->wait_mutex_);
+  } else {
+    self->wait_cond_.TimedWait(self->wait_mutex_, ts);
+  }
+  if (self->interrupted_) {
+    wasInterrupted = true;
+  }
+
+  self->interrupted_ = false;
+  self->wait_monitor_ = NULL;
+  self->wait_mutex_.Unlock();
+
+  // Reacquire the monitor lock.
+  Lock(self);
+
+done:
+  /*
+   * We remove our thread from wait set after restoring the count
+   * and owner fields so the subroutine can check that the calling
+   * thread owns the monitor. Aside from that, the order of member
+   * updates is not order sensitive as we hold the pthread mutex.
+   */
+  owner_ = self;
+  lock_count_ = prevLockCount;
+  owner_filename_ = savedFileName;
+  owner_line_number_ = savedLineNumber;
+  RemoveFromWaitSet(self);
+
+  /* set self->status back to Thread::kRunnable, and self-suspend if needed */
+  self->SetState(Thread::kRunnable);
+
+  if (wasInterrupted) {
+    /*
+     * We were interrupted while waiting, or somebody interrupted an
+     * un-interruptible thread earlier and we're bailing out immediately.
+     *
+     * The doc sayeth: "The interrupted status of the current thread is
+     * cleared when this exception is thrown."
+     */
+    self->interrupted_ = false;
+    if (interruptShouldThrow) {
+      Thread::Current()->ThrowNewException("Ljava/lang/InterruptedException;", "%s", "");
+    }
+  }
+}
+
+void Monitor::Notify(Thread* self) {
+  DCHECK(self != NULL);
+
+  // Make sure that we hold the lock.
+  if (owner_ != self) {
+    ThrowIllegalMonitorStateException("object not locked by thread before notify()");
+    return;
+  }
+  // Signal the first waiting thread in the wait set.
+  while (wait_set_ != NULL) {
+    Thread* thread = wait_set_;
+    wait_set_ = thread->wait_next_;
+    thread->wait_next_ = NULL;
+
+    // Check to see if the thread is still waiting.
+    MutexLock mu(thread->wait_mutex_);
+    if (thread->wait_monitor_ != NULL) {
+      thread->wait_cond_.Signal();
+      return;
+    }
+  }
+}
+
+void Monitor::NotifyAll(Thread* self) {
+  DCHECK(self != NULL);
+
+  // Make sure that we hold the lock.
+  if (owner_ != self) {
+    ThrowIllegalMonitorStateException("object not locked by thread before notifyAll()");
+    return;
+  }
+  // Signal all threads in the wait set.
+  while (wait_set_ != NULL) {
+    Thread* thread = wait_set_;
+    wait_set_ = thread->wait_next_;
+    thread->wait_next_ = NULL;
+    thread->Notify();
+  }
+}
+
+/*
+ * Changes the shape of a monitor from thin to fat, preserving the
+ * internal lock state. The calling thread must own the lock.
+ */
+void Monitor::Inflate(Thread* self, Object* obj) {
+  DCHECK(self != NULL);
+  DCHECK(obj != NULL);
+  DCHECK_EQ(LW_SHAPE(*obj->GetRawLockWordAddress()), LW_SHAPE_THIN);
+  DCHECK_EQ(LW_LOCK_OWNER(*obj->GetRawLockWordAddress()), static_cast<int32_t>(self->thin_lock_id_));
+
+  // Allocate and acquire a new monitor.
+  Monitor* m = new Monitor(obj);
+  // Replace the head of the list with the new monitor.
+  do {
+    m->next_ = gMonitorList;
+  } while (android_atomic_release_cas((int32_t)m->next_, (int32_t)m, (int32_t*)(void*)&gMonitorList) != 0);
+  m->Lock(self);
+  // Propagate the lock state.
+  uint32_t thin = *obj->GetRawLockWordAddress();
+  m->lock_count_ = LW_LOCK_COUNT(thin);
+  thin &= LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT;
+  thin |= reinterpret_cast<uint32_t>(m) | LW_SHAPE_FAT;
+  // Publish the updated lock word.
+  android_atomic_release_store(thin, obj->GetRawLockWordAddress());
+}
+
+void Monitor::MonitorEnter(Thread* self, Object* obj) {
+  volatile int32_t* thinp = obj->GetRawLockWordAddress();
+  struct timespec tm;
+  long sleepDelayNs;
+  long minSleepDelayNs = 1000000;  /* 1 millisecond */
+  long maxSleepDelayNs = 1000000000;  /* 1 second */
+  uint32_t thin, newThin, threadId;
+
+  assert(self != NULL);
+  assert(obj != NULL);
+  threadId = self->thin_lock_id_;
+retry:
+  thin = *thinp;
+  if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
+    /*
+     * The lock is a thin lock.  The owner field is used to
+     * determine the acquire method, ordered by cost.
+     */
+    if (LW_LOCK_OWNER(thin) == threadId) {
+      /*
+       * The calling thread owns the lock.  Increment the
+       * value of the recursion count field.
+       */
+      *thinp += 1 << LW_LOCK_COUNT_SHIFT;
+      if (LW_LOCK_COUNT(*thinp) == LW_LOCK_COUNT_MASK) {
+        /*
+         * The reacquisition limit has been reached.  Inflate
+         * the lock so the next acquire will not overflow the
+         * recursion count field.
+         */
+        Inflate(self, obj);
+      }
+    } else if (LW_LOCK_OWNER(thin) == 0) {
+      /*
+       * The lock is unowned.  Install the thread id of the
+       * calling thread into the owner field.  This is the
+       * common case.  In performance critical code the JIT
+       * will have tried this before calling out to the VM.
+       */
+      newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
+      if (android_atomic_acquire_cas(thin, newThin, thinp) != 0) {
+        // The acquire failed. Try again.
+        goto retry;
+      }
+    } else {
+      LOG(INFO) << StringPrintf("(%d) spin on lock %p: %#x (%#x) %#x", threadId, thinp, 0, *thinp, thin);
+      // The lock is owned by another thread. Notify the VM that we are about to wait.
+      Thread::State oldStatus = self->SetState(Thread::kBlocked);
+      // Spin until the thin lock is released or inflated.
+      sleepDelayNs = 0;
+      for (;;) {
+        thin = *thinp;
+        // Check the shape of the lock word. Another thread
+        // may have inflated the lock while we were waiting.
+        if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
+          if (LW_LOCK_OWNER(thin) == 0) {
+            // The lock has been released. Install the thread id of the
+            // calling thread into the owner field.
+            newThin = thin | (threadId << LW_LOCK_OWNER_SHIFT);
+            if (android_atomic_acquire_cas(thin, newThin, thinp) == 0) {
+              // The acquire succeed. Break out of the loop and proceed to inflate the lock.
+              break;
+            }
+          } else {
+            // The lock has not been released. Yield so the owning thread can run.
+            if (sleepDelayNs == 0) {
+              sched_yield();
+              sleepDelayNs = minSleepDelayNs;
+            } else {
+              tm.tv_sec = 0;
+              tm.tv_nsec = sleepDelayNs;
+              nanosleep(&tm, NULL);
+              // Prepare the next delay value. Wrap to avoid once a second polls for eternity.
+              if (sleepDelayNs < maxSleepDelayNs / 2) {
+                sleepDelayNs *= 2;
+              } else {
+                sleepDelayNs = minSleepDelayNs;
+              }
+            }
+          }
+        } else {
+          // The thin lock was inflated by another thread. Let the VM know we are no longer
+          // waiting and try again.
+          LOG(INFO) << "(" << threadId << ") lock " << (void*) thinp << " surprise-fattened";
+          self->SetState(oldStatus);
+          goto retry;
+        }
+      }
+      LOG(INFO) << StringPrintf("(%d) spin on lock done %p: %#x (%#x) %#x", threadId, thinp, 0, *thinp, thin);
+      // We have acquired the thin lock. Let the VM know that we are no longer waiting.
+      self->SetState(oldStatus);
+      // Fatten the lock.
+      Inflate(self, obj);
+      LOG(INFO) << StringPrintf("(%d) lock %p fattened", threadId, thinp);
+    }
+  } else {
+    // The lock is a fat lock.
+    DCHECK(LW_MONITOR(*thinp) != NULL);
+    LW_MONITOR(*thinp)->Lock(self);
+  }
+}
+
+bool Monitor::MonitorExit(Thread* self, Object* obj) {
+  volatile int32_t* thinp = obj->GetRawLockWordAddress();
+
+  DCHECK(self != NULL);
+  DCHECK_EQ(self->GetState(), Thread::kRunnable);
+  DCHECK(obj != NULL);
+
+  /*
+   * Cache the lock word as its value can change while we are
+   * examining its state.
+   */
+  uint32_t thin = *thinp;
+  if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
+    /*
+     * The lock is thin.  We must ensure that the lock is owned
+     * by the given thread before unlocking it.
+     */
+    if (LW_LOCK_OWNER(thin) == self->thin_lock_id_) {
+      /*
+       * We are the lock owner.  It is safe to update the lock
+       * without CAS as lock ownership guards the lock itself.
+       */
+      if (LW_LOCK_COUNT(thin) == 0) {
+        /*
+         * The lock was not recursively acquired, the common
+         * case.  Unlock by clearing all bits except for the
+         * hash state.
+         */
+        thin &= (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT);
+        android_atomic_release_store(thin, thinp);
+      } else {
+        /*
+         * The object was recursively acquired.  Decrement the
+         * lock recursion count field.
+         */
+        *thinp -= 1 << LW_LOCK_COUNT_SHIFT;
+      }
+    } else {
+      /*
+       * We do not own the lock.  The JVM spec requires that we
+       * throw an exception in this case.
+       */
+      ThrowIllegalMonitorStateException("unlock of unowned monitor");
+      return false;
+    }
+  } else {
+    /*
+     * The lock is fat.  We must check to see if Unlock has
+     * raised any exceptions before continuing.
+     */
+    DCHECK(LW_MONITOR(*thinp) != NULL);
+    if (!LW_MONITOR(*thinp)->Unlock(self)) {
+      // An exception has been raised.  Do not fall through.
+      return false;
+    }
+  }
+  return true;
+}
+
+/*
+ * Object.wait().  Also called for class init.
+ */
+void Monitor::Wait(Thread* self, Object *obj, int64_t ms, int32_t ns, bool interruptShouldThrow) {
+  volatile int32_t* thinp = obj->GetRawLockWordAddress();
+
+  // If the lock is still thin, we need to fatten it.
+  uint32_t thin = *thinp;
+  if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
+    // Make sure that 'self' holds the lock.
+    if (LW_LOCK_OWNER(thin) != self->thin_lock_id_) {
+      ThrowIllegalMonitorStateException("object not locked by thread before wait()");
+      return;
+    }
+
+    /* This thread holds the lock.  We need to fatten the lock
+     * so 'self' can block on it.  Don't update the object lock
+     * field yet, because 'self' needs to acquire the lock before
+     * any other thread gets a chance.
+     */
+    Inflate(self, obj);
+    LOG(INFO) << StringPrintf("(%d) lock %p fattened by wait()", self->thin_lock_id_, thinp);
+  }
+  LW_MONITOR(*thinp)->Wait(self, ms, ns, interruptShouldThrow);
+}
+
+void Monitor::Notify(Thread* self, Object *obj) {
+  uint32_t thin = *obj->GetRawLockWordAddress();
+
+  // If the lock is still thin, there aren't any waiters;
+  // waiting on an object forces lock fattening.
+  if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
+    // Make sure that 'self' holds the lock.
+    if (LW_LOCK_OWNER(thin) != self->thin_lock_id_) {
+      ThrowIllegalMonitorStateException("object not locked by thread before notify()");
+      return;
+    }
+    // no-op;  there are no waiters to notify.
+  } else {
+    // It's a fat lock.
+    LW_MONITOR(thin)->Notify(self);
+  }
+}
+
+void Monitor::NotifyAll(Thread* self, Object *obj) {
+  uint32_t thin = *obj->GetRawLockWordAddress();
+
+  // If the lock is still thin, there aren't any waiters;
+  // waiting on an object forces lock fattening.
+  if (LW_SHAPE(thin) == LW_SHAPE_THIN) {
+    // Make sure that 'self' holds the lock.
+    if (LW_LOCK_OWNER(thin) != self->thin_lock_id_) {
+      ThrowIllegalMonitorStateException("object not locked by thread before notifyAll()");
+      return;
+    }
+    // no-op;  there are no waiters to notify.
+  } else {
+    // It's a fat lock.
+    LW_MONITOR(thin)->NotifyAll(self);
+  }
+}
+
+uint32_t Monitor::GetLockOwner(uint32_t raw_lock_word) {
+  if (LW_SHAPE(raw_lock_word) == LW_SHAPE_THIN) {
+    return LW_LOCK_OWNER(raw_lock_word);
+  } else {
+    Thread* owner = LW_MONITOR(raw_lock_word)->owner_;
+    return owner ? owner->GetThinLockId() : 0;
+  }
+}
+
+}  // namespace art
diff --git a/src/monitor.h b/src/monitor.h
index b82c463..8c22fb1 100644
--- a/src/monitor.h
+++ b/src/monitor.h
@@ -1,77 +1,122 @@
-// Copyright 2011 Google Inc. All Rights Reserved.
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 
 #ifndef ART_SRC_MONITOR_H_
 #define ART_SRC_MONITOR_H_
 
-#include "logging.h"
-#include "macros.h"
+#include <pthread.h>
+#include <stdint.h>
+
+#include "mutex.h"
 
 namespace art {
 
+/*
+ * Monitor shape field. Used to distinguish thin locks from fat locks.
+ */
+#define LW_SHAPE_THIN 0
+#define LW_SHAPE_FAT 1
+#define LW_SHAPE_MASK 0x1
+#define LW_SHAPE(x) ((x) & LW_SHAPE_MASK)
+
+/*
+ * Hash state field.  Used to signify that an object has had its
+ * identity hash code exposed or relocated.
+ */
+#define LW_HASH_STATE_UNHASHED 0
+#define LW_HASH_STATE_HASHED 1
+#define LW_HASH_STATE_HASHED_AND_MOVED 3
+#define LW_HASH_STATE_MASK 0x3
+#define LW_HASH_STATE_SHIFT 1
+#define LW_HASH_STATE(x) (((x) >> LW_HASH_STATE_SHIFT) & LW_HASH_STATE_MASK)
+
+/*
+ * Lock owner field.  Contains the thread id of the thread currently
+ * holding the lock.
+ */
+#define LW_LOCK_OWNER_MASK 0xffff
+#define LW_LOCK_OWNER_SHIFT 3
+#define LW_LOCK_OWNER(x) (((x) >> LW_LOCK_OWNER_SHIFT) & LW_LOCK_OWNER_MASK)
+
+class Object;
+class Thread;
+
 class Monitor {
  public:
+  ~Monitor();
 
-  // Lock constants used by compiler
-  static const uint32_t kLwLockOwnerShift = 3;
-  static const uint32_t kLwHashStateShift = 1;
-  static const uint32_t kLwHashStateMask = 0x3;
-  static const uint32_t kLwShapeThin = 0;
+  static uint32_t GetLockOwner(uint32_t raw_lock_word);
 
-  void Enter() {
-  }
+  static void MonitorEnter(Thread* thread, Object* obj);
+  static bool MonitorExit(Thread* thread, Object* obj);
 
-  void Exit() {
-  }
+  static void Notify(Thread* self, Object* obj);
+  static void NotifyAll(Thread* self, Object* obj);
+  static void Wait(Thread* self, Object* obj, int64_t ms, int32_t ns, bool interruptShouldThrow);
 
-  void Notify() {
-  }
+  static void SweepMonitorList(bool (isUnmarkedObject)(void*));
 
-  void NotifyAll() {
-  }
-
-  void Wait() {
-  }
-
-  void Wait(int64_t timeout) {
-    Wait(timeout, 0);
-  }
-
-  void Wait(int64_t timeout, int32_t nanos) {
-  }
+  static void FreeMonitorList();
 
  private:
-  DISALLOW_COPY_AND_ASSIGN(Monitor);
+  Monitor(Object* obj);
 
+  void AppendToWaitSet(Thread* thread);
+  void RemoveFromWaitSet(Thread* thread);
+
+  static void Inflate(Thread* self, Object* obj);
+
+  void Lock(Thread* self);
+  bool Unlock(Thread* thread);
+
+  void Notify(Thread* self);
+  void NotifyAll(Thread* self);
+
+  void Wait(Thread* self, int64_t msec, int32_t nsec, bool interruptShouldThrow);
+
+  /* Which thread currently owns the lock? */
+  Thread* owner_;
+
+  /* Owner's recursive lock depth */
+  int lock_count_;
+
+  /* What object are we part of (for debugging). */
+  Object* obj_;
+
+  /* Threads currently waiting on this monitor. */
+  Thread* wait_set_;
+
+  Mutex lock_;
+
+  Monitor* next_;
+
+  /*
+   * Who last acquired this monitor, when lock sampling is enabled.
+   * Even when enabled, ownerFileName may be NULL.
+   */
+  const char* owner_filename_;
+  uint32_t owner_line_number_;
+
+  friend class Object;
 };
 
-class MonitorLock {
- public:
-
-  MonitorLock(Monitor* monitor) : monitor_(monitor) {
-    CHECK(monitor != NULL);
-    monitor_->Enter();
-  }
-
-  ~MonitorLock() {
-    monitor_->Exit();
-  }
-
-  void Wait(int64_t millis = 0) {
-    monitor_->Wait(millis);
-  }
-
-  void Notify() {
-    monitor_->Notify();
-  }
-
-  void NotifyAll() {
-    monitor_->NotifyAll();
-  }
-
- private:
-  Monitor* const monitor_;
-  DISALLOW_COPY_AND_ASSIGN(MonitorLock);
-};
+/*
+ * Relative timed wait on condition
+ */
+int dvmRelativeCondWait(pthread_cond_t* cond, pthread_mutex_t* mutex, int64_t msec, int32_t nsec);
 
 }  // namespace art
 
diff --git a/src/mutex.cc b/src/mutex.cc
index fcd16ee..27bb627 100644
--- a/src/mutex.cc
+++ b/src/mutex.cc
@@ -79,4 +79,37 @@
   return art::GetTid();
 }
 
+ConditionVariable::ConditionVariable(const std::string& name) : name_(name) {
+  CHECK_MUTEX_CALL(pthread_cond_init, (&cond_, NULL));
+}
+
+ConditionVariable::~ConditionVariable() {
+  CHECK_MUTEX_CALL(pthread_cond_destroy, (&cond_));
+}
+
+void ConditionVariable::Broadcast() {
+  CHECK_MUTEX_CALL(pthread_cond_broadcast, (&cond_));
+}
+
+void ConditionVariable::Signal() {
+  CHECK_MUTEX_CALL(pthread_cond_signal, (&cond_));
+}
+
+void ConditionVariable::Wait(Mutex& mutex) {
+  CHECK_MUTEX_CALL(pthread_cond_wait, (&cond_, mutex.GetImpl()));
+}
+
+void ConditionVariable::TimedWait(Mutex& mutex, const timespec& ts) {
+#ifdef HAVE_TIMEDWAIT_MONOTONIC
+#define TIMEDWAIT pthread_cond_timedwait_monotonic
+#else
+#define TIMEDWAIT pthread_cond_timedwait
+#endif
+  int rc = TIMEDWAIT(&cond_, mutex.GetImpl(), &ts);
+  if (rc != 0 && rc != ETIMEDOUT) {
+    errno = rc;
+    PLOG(FATAL) << "TimedWait failed for " << name_;
+  }
+}
+
 }  // namespace
diff --git a/src/mutex.h b/src/mutex.h
index cbfd5a0..43194e9 100644
--- a/src/mutex.h
+++ b/src/mutex.h
@@ -82,6 +82,22 @@
   DISALLOW_COPY_AND_ASSIGN(MutexLock);
 };
 
+class ConditionVariable {
+ public:
+  ConditionVariable(const std::string& name);
+  ~ConditionVariable();
+
+  void Broadcast();
+  void Signal();
+  void Wait(Mutex& mutex);
+  void TimedWait(Mutex& mutex, const timespec& ts);
+
+ private:
+  pthread_cond_t cond_;
+  std::string name_;
+  DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
+};
+
 }  // namespace art
 
 #endif  // ART_SRC_MUTEX_H_
diff --git a/src/object.cc b/src/object.cc
index b5e66d9..c54364f 100644
--- a/src/object.cc
+++ b/src/object.cc
@@ -10,12 +10,13 @@
 
 #include "class_linker.h"
 #include "class_loader.h"
+#include "dex_cache.h"
+#include "dex_file.h"
 #include "globals.h"
 #include "heap.h"
 #include "intern_table.h"
 #include "logging.h"
-#include "dex_cache.h"
-#include "dex_file.h"
+#include "monitor.h"
 #include "runtime.h"
 
 namespace art {
@@ -25,6 +26,30 @@
   return GetClass() == GetClass()->GetDescriptor()->GetClass();
 }
 
+uint32_t Object::GetLockOwner() {
+  return Monitor::GetLockOwner(monitor_);
+}
+
+void Object::MonitorEnter(Thread* thread) {
+  Monitor::MonitorEnter(thread, this);
+}
+
+void Object::MonitorExit(Thread* thread) {
+  Monitor::MonitorExit(thread, this);
+}
+
+void Object::Notify() {
+  Monitor::Notify(Thread::Current(), this);
+}
+
+void Object::NotifyAll() {
+  Monitor::NotifyAll(Thread::Current(), this);
+}
+
+void Object::Wait(int64_t ms, int32_t ns) {
+  Monitor::Wait(Thread::Current(), this, ms, ns, true);
+}
+
 // TODO: get global references for these
 Class* Field::java_lang_reflect_Field_ = NULL;
 
@@ -741,8 +766,8 @@
 
 void Class::CanPutArrayElementFromCode(const Class* object_class, const Class* array_class) {
   if (!CanPutArrayElement(object_class, array_class)) {
-    LOG(ERROR) << "Can't put a " << PrettyDescriptor(object_class->GetDescriptor())
-               << " into a " << PrettyDescriptor(array_class->GetDescriptor());
+    LOG(ERROR) << "Can't put a " << PrettyClass(object_class)
+               << " into a " << PrettyClass(array_class);
     UNIMPLEMENTED(FATAL) << "need to throw ArrayStoreException and unwind stack";
   }
 }
diff --git a/src/object.h b/src/object.h
index 5ecd995..59909e1 100644
--- a/src/object.h
+++ b/src/object.h
@@ -27,8 +27,6 @@
 #include "heap.h"
 #include "logging.h"
 #include "macros.h"
-#include "monitor.h"
-#include "monitor.h"
 #include "offsets.h"
 #include "runtime.h"
 #include "stringpiece.h"
@@ -204,8 +202,7 @@
   }
 
   Class* GetClass() const {
-    return
-        GetFieldObject<Class*>(OFFSET_OF_OBJECT_MEMBER(Object, klass_), false);
+    return GetFieldObject<Class*>(OFFSET_OF_OBJECT_MEMBER(Object, klass_), false);
   }
 
   void SetClass(Class* new_klass);
@@ -223,45 +220,25 @@
     return OFFSET_OF_OBJECT_MEMBER(Object, monitor_);
   }
 
-  Monitor* GetMonitor() const {
-    return GetFieldPtr<Monitor*>(
-        OFFSET_OF_OBJECT_MEMBER(Object, monitor_), false);
+  volatile int32_t* GetRawLockWordAddress() {
+    byte* raw_addr = reinterpret_cast<byte*>(this) + OFFSET_OF_OBJECT_MEMBER(Object, monitor_).Int32Value();
+    int32_t* word_addr = reinterpret_cast<int32_t*>(raw_addr);
+    return const_cast<volatile int32_t*>(word_addr);
   }
 
-  void SetMonitor(Monitor* monitor) {
-    // TODO: threading - compare-and-set
-    SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(Object, monitor_), monitor, false);
-  }
+  uint32_t GetLockOwner();
 
-  void MonitorEnter(Thread* thread = NULL) {
-    // TODO: use thread to get lock id
-    GetMonitor()->Enter();
-  }
+  void MonitorEnter(Thread* thread);
 
-  void MonitorExit(Thread* thread = NULL) {
-    // TODO: use thread to get lock id
-    GetMonitor()->Exit();
-  }
+  void MonitorExit(Thread* thread);
 
-  void Notify() {
-    GetMonitor()->Notify();
-  }
+  void Notify();
 
-  void NotifyAll() {
-    GetMonitor()->NotifyAll();
-  }
+  void NotifyAll();
 
-  void Wait() {
-    GetMonitor()->Wait();
-  }
+  void Wait(int64_t timeout);
 
-  void Wait(int64_t timeout) {
-    GetMonitor()->Wait(timeout);
-  }
-
-  void Wait(int64_t timeout, int32_t nanos) {
-    GetMonitor()->Wait(timeout, nanos);
-  }
+  void Wait(int64_t timeout, int32_t nanos);
 
   bool IsClass() const;
 
@@ -433,40 +410,13 @@
  private:
   Class* klass_;
 
-  Monitor* monitor_;
+  uint32_t monitor_;
 
-  friend struct ObjectOffsets;  // for verifying offset information
+  friend class ImageWriter;  // for abusing monitor_ directly
+  friend class ObjectOffsets;  // for verifying offset information
   DISALLOW_IMPLICIT_CONSTRUCTORS(Object);
 };
 
-class ObjectLock {
- public:
-  explicit ObjectLock(Object* object) : obj_(object) {
-    CHECK(object != NULL);
-    obj_->MonitorEnter();
-  }
-
-  ~ObjectLock() {
-    obj_->MonitorExit();
-  }
-
-  void Wait(int64_t millis = 0) {
-    return obj_->Wait(millis);
-  }
-
-  void Notify() {
-    obj_->Notify();
-  }
-
-  void NotifyAll() {
-    obj_->NotifyAll();
-  }
-
- private:
-  Object* obj_;
-  DISALLOW_COPY_AND_ASSIGN(ObjectLock);
-};
-
 // C++ mirror of java.lang.reflect.AccessibleObject
 class MANAGED AccessibleObject : public Object {
  private:
@@ -638,11 +588,11 @@
 
   ByteArray* GetRegisterMapData() const;
 
-  void SetRegisterMapData(ByteArray* new_data);
+  void SetRegisterMapData(ByteArray* data);
 
   ByteArray* GetRegisterMapHeader() const;
 
-  void SetRegisterMapHeader(ByteArray* new_header);
+  void SetRegisterMapHeader(ByteArray* header);
 
   String* GetShorty() const;
 
@@ -1517,18 +1467,16 @@
 
   void SetClassSize(size_t new_class_size) {
     DCHECK(new_class_size >= GetClassSize())
-            << " class=" << PrettyType(this)
+            << " class=" << PrettyTypeOf(this)
             << " new_class_size=" << new_class_size
             << " GetClassSize=" << GetClassSize();
-    SetField32(OFFSET_OF_OBJECT_MEMBER(Class, class_size_), new_class_size,
-               false);
+    SetField32(OFFSET_OF_OBJECT_MEMBER(Class, class_size_), new_class_size, false);
   }
 
   size_t GetObjectSize() const {
     CHECK(!IsVariableSize());
     CHECK(sizeof(size_t) == sizeof(int32_t));
-    size_t result = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, object_size_),
-                               false);
+    size_t result = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, object_size_), false);
     CHECK_GE(result, sizeof(Object));
     return result;
   }
@@ -2047,7 +1995,7 @@
   // Total class size; used when allocating storage on gc heap.
   size_t class_size_;
 
-  // threadId, used to check for recursive <clinit> invocation
+  // tid used to check for recursive <clinit> invocation
   pid_t clinit_thread_id_;
 
   // number of instance fields that are object refs
@@ -2551,9 +2499,9 @@
       OFFSET_OF_OBJECT_MEMBER(Method, register_map_data_), false);
 }
 
-inline void Method::SetRegisterMapData(ByteArray* new_data) {
+inline void Method::SetRegisterMapData(ByteArray* data) {
   SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Method, register_map_data_),
-                 new_data, false);
+                 data, false);
 }
 
 inline ByteArray* Method::GetRegisterMapHeader() const {
@@ -2561,9 +2509,9 @@
       OFFSET_OF_OBJECT_MEMBER(Method, register_map_header_), false);
 }
 
-inline void Method::SetRegisterMapHeader(ByteArray* new_header) {
+inline void Method::SetRegisterMapHeader(ByteArray* header) {
   SetFieldObject(OFFSET_OF_OBJECT_MEMBER(Method, register_map_header_),
-                 new_header, false);
+                 header, false);
 }
 
 inline String* Method::GetShorty() const {
diff --git a/src/reference_table.cc b/src/reference_table.cc
index df3daf2..1b20b52 100644
--- a/src/reference_table.cc
+++ b/src/reference_table.cc
@@ -108,7 +108,7 @@
     return;
   }
 
-  std::string className(PrettyType(obj));
+  std::string className(PrettyTypeOf(obj));
   if (obj->IsClass()) {
     // We're summarizing multiple instances, so using the exemplar
     // Class' type parameter here would be misleading.
@@ -165,7 +165,7 @@
       continue;
     }
 
-    std::string className(PrettyType(ref));
+    std::string className(PrettyTypeOf(ref));
 
     std::string extras;
     size_t elems = GetElementCount(ref);
diff --git a/src/signal_catcher.cc b/src/signal_catcher.cc
index d72002f..ecd48cd 100644
--- a/src/signal_catcher.cc
+++ b/src/signal_catcher.cc
@@ -30,18 +30,17 @@
 
 namespace art {
 
-SignalCatcher::SignalCatcher() : lock_("SignalCatcher lock"), thread_(NULL) {
+SignalCatcher::SignalCatcher()
+    : lock_("SignalCatcher lock"), cond_("SignalCatcher::cond_"), thread_(NULL) {
   SetHaltFlag(false);
 
   // Create a raw pthread; its start routine will attach to the runtime.
   CHECK_PTHREAD_CALL(pthread_create, (&pthread_, NULL, &Run, this), "signal catcher thread");
 
-  CHECK_PTHREAD_CALL(pthread_cond_init, (&cond_, NULL), "SignalCatcher::cond_");
   MutexLock mu(lock_);
   while (thread_ == NULL) {
-    CHECK_PTHREAD_CALL(pthread_cond_wait, (&cond_, lock_.GetImpl()), __FUNCTION__);
+    cond_.Wait(lock_);
   }
-  CHECK_PTHREAD_CALL(pthread_cond_destroy, (&cond_), "SignalCatcher::cond_");
 }
 
 SignalCatcher::~SignalCatcher() {
@@ -123,7 +122,7 @@
   {
     MutexLock mu(signal_catcher->lock_);
     signal_catcher->thread_ = Thread::Current();
-    CHECK_PTHREAD_CALL(pthread_cond_broadcast, (&signal_catcher->cond_), __FUNCTION__);
+    signal_catcher->cond_.Broadcast();
   }
 
   // Set up mask with signals we want to handle.
diff --git a/src/signal_catcher.h b/src/signal_catcher.h
index 16cf9e4..123b38f 100644
--- a/src/signal_catcher.h
+++ b/src/signal_catcher.h
@@ -45,7 +45,7 @@
 
   mutable Mutex lock_;
   bool halt_;
-  pthread_cond_t cond_;
+  ConditionVariable cond_;
   pthread_t pthread_;
   Thread* thread_;
 };
diff --git a/src/thread.cc b/src/thread.cc
index 0da46b6..b11dbbd 100644
--- a/src/thread.cc
+++ b/src/thread.cc
@@ -831,6 +831,7 @@
 Thread::Thread()
     : peer_(NULL),
       wait_mutex_("Thread wait mutex"),
+      wait_cond_("Thread wait condition variable"),
       wait_monitor_(NULL),
       interrupted_(false),
       stack_end_(NULL),
@@ -846,7 +847,7 @@
 
 void MonitorExitVisitor(const Object* object, void*) {
   Object* entered_monitor = const_cast<Object*>(object);
-  entered_monitor->MonitorExit();
+  entered_monitor->MonitorExit(Thread::Current());
 }
 
 Thread::~Thread() {
@@ -875,11 +876,21 @@
 
   // Thread.join() is implemented as an Object.wait() on the Thread.lock
   // object. Signal anyone who is waiting.
-  //Object* lock = dvmGetFieldObject(self->threadObj, gDvm.offJavaLangThread_lock);
-  //dvmLockObject(self, lock);
-  //dvmObjectNotifyAll(self, lock);
-  //dvmUnlockObject(self, lock);
-  //lock = NULL;
+  if (peer_ != NULL) {
+    ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+    Class* java_lang_Thread_class = class_linker->FindSystemClass("Ljava/lang/Thread;");
+    Class* java_lang_ThreadLock_class = class_linker->FindSystemClass("Ljava/lang/ThreadLock;");
+    Field* lock_field = java_lang_Thread_class->FindDeclaredInstanceField("lock", java_lang_ThreadLock_class);
+
+    Thread* self = Thread::Current();
+    Object* lock = lock_field->GetObject(peer_);
+    // This conditional is only needed for tests, where Thread.lock won't have been set.
+    if (lock != NULL) {
+      lock->MonitorEnter(self);
+      lock->NotifyAll();
+      lock->MonitorExit(self);
+    }
+  }
 
   delete jni_env_;
   jni_env_ = NULL;
@@ -1104,8 +1115,7 @@
     uint32_t native_pc = pc_trace->Get(i);
     Class* klass = method->GetDeclaringClass();
     const DexFile& dex_file = class_linker->FindDexFile(klass->GetDexCache());
-    String* readable_descriptor = String::AllocFromModifiedUtf8(
-        PrettyDescriptor(klass->GetDescriptor()).c_str());
+    String* readable_descriptor = String::AllocFromModifiedUtf8(PrettyClass(klass).c_str());
 
     // Allocate element, potentially triggering GC
     StackTraceElement* obj =
@@ -1233,6 +1243,13 @@
   return result;
 }
 
+bool Thread::HoldsLock(Object* object) {
+  if (object == NULL) {
+    return false;
+  }
+  return object->GetLockOwner() == thin_lock_id_;
+}
+
 void Thread::VisitRoots(Heap::RootVisitor* visitor, void* arg) const {
   if (exception_ != NULL) {
     visitor(exception_, arg);
diff --git a/src/thread.h b/src/thread.h
index 2e7b615..c362a43 100644
--- a/src/thread.h
+++ b/src/thread.h
@@ -267,6 +267,8 @@
 
   void WaitUntilSuspended();
 
+  bool HoldsLock(Object*);
+
   /*
    * Changes the priority of this thread to match that of the java.lang.Thread object.
    *
@@ -400,6 +402,20 @@
     return interrupted_;
   }
 
+  void Interrupt() {
+    MutexLock mu(wait_mutex_);
+    if (interrupted_) {
+      return;
+    }
+    interrupted_ = true;
+    NotifyLocked();
+  }
+
+  void Notify() {
+    MutexLock mu(wait_mutex_);
+    NotifyLocked();
+  }
+
   void RegisterExceptionEntryPoint(void (*handler)(Method**)) {
     exception_entry_point_ = handler;
   }
@@ -454,7 +470,7 @@
     return ThreadOffset(OFFSETOF_MEMBER(Thread, exception_));
   }
 
-  static ThreadOffset IdOffset() {
+  static ThreadOffset ThinLockIdOffset() {
     return ThreadOffset(OFFSETOF_MEMBER(Thread, thin_lock_id_));
   }
 
@@ -517,6 +533,12 @@
   void InitFunctionPointers();
   void InitStackHwm();
 
+  void NotifyLocked() {
+    if (wait_monitor_ != NULL) {
+      wait_cond_.Signal();
+    }
+  }
+
   static void ThreadExitCallback(void* arg);
 
   void WalkStack(StackVisitor* visitor) const;
@@ -541,10 +563,15 @@
 
   // Guards the 'interrupted_' and 'wait_monitor_' members.
   mutable Mutex wait_mutex_;
+  ConditionVariable wait_cond_;
   // Pointer to the monitor lock we're currently waiting on (or NULL), guarded by wait_mutex_.
   Monitor* wait_monitor_;
   // Thread "interrupted" status; stays raised until queried or thrown, guarded by wait_mutex_.
   bool interrupted_;
+  // The next thread in the wait set this thread is part of.
+  Thread* wait_next_;
+
+  friend class Monitor;
 
   // FIXME: placeholder for the gc cardTable
   uint32_t card_table_;
diff --git a/src/thread_list.cc b/src/thread_list.cc
index 63cbf40..7b743ee 100644
--- a/src/thread_list.cc
+++ b/src/thread_list.cc
@@ -20,9 +20,9 @@
 
 ThreadList::ThreadList()
     : thread_list_lock_("thread list lock"),
-      thread_suspend_count_lock_("thread suspend count lock") {
-  CHECK_PTHREAD_CALL(pthread_cond_init, (&thread_start_cond_, NULL), "thread_start_cond_");
-  CHECK_PTHREAD_CALL(pthread_cond_init, (&thread_suspend_count_cond_, NULL), "thread_suspend_count_cond_");
+      thread_start_cond_("thread_start_cond_"),
+      thread_suspend_count_lock_("thread suspend count lock"),
+      thread_suspend_count_cond_("thread_suspend_count_cond_") {
 }
 
 ThreadList::~ThreadList() {
@@ -30,9 +30,6 @@
     Runtime::Current()->DetachCurrentThread();
   }
 
-  CHECK_PTHREAD_CALL(pthread_cond_destroy, (&thread_start_cond_), "thread_start_cond_");
-  CHECK_PTHREAD_CALL(pthread_cond_destroy, (&thread_suspend_count_cond_), "thread_suspend_count_cond_");
-
   // All threads should have exited and unregistered when we
   // reach this point. This means that all daemon threads had been
   // shutdown cleanly.
@@ -71,7 +68,7 @@
        * and re-acquiring the lock provides the memory barriers we
        * need for correct behavior on SMP.
        */
-      CHECK_PTHREAD_CALL(pthread_cond_wait, (&thread_suspend_count_cond_, thread_suspend_count_lock_.GetImpl()), __FUNCTION__);
+      thread_suspend_count_cond_.Wait(thread_suspend_count_lock_);
     }
     CHECK_EQ(thread->suspend_count_, 0);
   }
@@ -152,7 +149,7 @@
   {
     //LOG(INFO) << *self << " ResumeAll waking others";
     MutexLock mu(thread_suspend_count_lock_);
-    CHECK_PTHREAD_CALL(pthread_cond_broadcast, (&thread_suspend_count_cond_), "thread_suspend_count_cond_");
+    thread_suspend_count_cond_.Broadcast();
   }
 
   //LOG(INFO) << *self << " ResumeAll complete";
@@ -212,7 +209,7 @@
 
     // We wait for the child to tell us that it's in the thread list.
     while (child->GetState() != Thread::kStarting) {
-      CHECK_PTHREAD_CALL(pthread_cond_wait, (&thread_start_cond_, thread_list_lock_.GetImpl()), __FUNCTION__);
+      thread_start_cond_.Wait(thread_list_lock_);
     }
   }
 
@@ -222,7 +219,7 @@
 
   // Tell the child that it's safe: it will see any future suspend request.
   child->SetState(Thread::kVmWait);
-  CHECK_PTHREAD_CALL(pthread_cond_broadcast, (&thread_start_cond_), __FUNCTION__);
+  thread_start_cond_.Broadcast();
 }
 
 void ThreadList::WaitForGo() {
@@ -233,12 +230,12 @@
 
   // Tell our parent that we're in the thread list.
   self->SetState(Thread::kStarting);
-  CHECK_PTHREAD_CALL(pthread_cond_broadcast, (&thread_start_cond_), __FUNCTION__);
+  thread_start_cond_.Broadcast();
 
   // Wait until our parent tells us there's no suspend still pending
   // from before we were on the thread list.
   while (self->GetState() != Thread::kVmWait) {
-    CHECK_PTHREAD_CALL(pthread_cond_wait, (&thread_start_cond_, thread_list_lock_.GetImpl()), __FUNCTION__);
+    thread_start_cond_.Wait(thread_list_lock_);
   }
 
   // Enter the runnable state. We know that any pending suspend will affect us now.
diff --git a/src/thread_list.h b/src/thread_list.h
index 5630b29..aa1415a 100644
--- a/src/thread_list.h
+++ b/src/thread_list.h
@@ -59,12 +59,12 @@
   std::bitset<kMaxThreadId> allocated_ids_;
   std::list<Thread*> list_;
 
-  pthread_cond_t thread_start_cond_;
+  ConditionVariable thread_start_cond_;
 
   // This lock guards every thread's suspend_count_ field...
   mutable Mutex thread_suspend_count_lock_;
   // ...and is used in conjunction with this condition variable.
-  pthread_cond_t thread_suspend_count_cond_;
+  ConditionVariable thread_suspend_count_cond_;
 
   friend class Thread;
   friend class ThreadListLock;
diff --git a/src/utils.cc b/src/utils.cc
index d146166..08ab705 100644
--- a/src/utils.cc
+++ b/src/utils.cc
@@ -97,11 +97,16 @@
   return result;
 }
 
-std::string PrettyField(const Field* f) {
+std::string PrettyField(const Field* f, bool with_type) {
   if (f == NULL) {
     return "null";
   }
-  std::string result(PrettyDescriptor(f->GetDeclaringClass()->GetDescriptor()));
+  std::string result;
+  if (with_type) {
+    result += PrettyDescriptor(f->GetType()->GetDescriptor());
+    result += ' ';
+  }
+  result += PrettyDescriptor(f->GetDeclaringClass()->GetDescriptor());
   result += '.';
   result += f->GetName()->ToModifiedUtf8();
   return result;
@@ -123,7 +128,7 @@
   return result;
 }
 
-std::string PrettyType(const Object* obj) {
+std::string PrettyTypeOf(const Object* obj) {
   if (obj == NULL) {
     return "null";
   }
@@ -137,6 +142,17 @@
   return result;
 }
 
+std::string PrettyClass(const Class* c) {
+  if (c == NULL) {
+    return "null";
+  }
+  std::string result;
+  result += "java.lang.Class<";
+  result += PrettyDescriptor(c->GetDescriptor());
+  result += ">";
+  return result;
+}
+
 std::string MangleForJni(const std::string& s) {
   std::string result;
   size_t char_count = CountModifiedUtf8Chars(s.c_str());
diff --git a/src/utils.h b/src/utils.h
index e6ed1d2..fc58617 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -14,6 +14,7 @@
 
 namespace art {
 
+class Class;
 class Field;
 class Method;
 class Object;
@@ -142,13 +143,16 @@
   return result;
 }
 
+// Used to implement PrettyClass, PrettyField, PrettyMethod, and PrettyTypeOf,
+// one of which is probably more useful to you.
 // Returns a human-readable equivalent of 'descriptor'. So "I" would be "int",
 // "[[I" would be "int[][]", "[Ljava/lang/String;" would be
 // "java.lang.String[]", and so forth.
 std::string PrettyDescriptor(const String* descriptor);
 
-// Returns a human-readable signature for 'f'. Something like "a.b.C.f".
-std::string PrettyField(const Field* f);
+// Returns a human-readable signature for 'f'. Something like "a.b.C.f" or
+// "int a.b.C.f" (depending on the value of 'with_type').
+std::string PrettyField(const Field* f, bool with_type = true);
 
 // Returns a human-readable signature for 'm'. Something like "a.b.C.m" or
 // "a.b.C.m(II)V" (depending on the value of 'with_signature').
@@ -158,7 +162,11 @@
 // So given an instance of java.lang.String, the output would
 // be "java.lang.String". Given an array of int, the output would be "int[]".
 // Given String.class, the output would be "java.lang.Class<java.lang.String>".
-std::string PrettyType(const Object* obj);
+std::string PrettyTypeOf(const Object* obj);
+
+// Returns a human-readable form of the name of the given class.
+// Given String.class, the output would be "java.lang.Class<java.lang.String>".
+std::string PrettyClass(const Class* c);
 
 // Performs JNI name mangling as described in section 11.3 "Linking Native Methods"
 // of the JNI spec.
diff --git a/src/utils_test.cc b/src/utils_test.cc
index 21167d6..0869b89 100644
--- a/src/utils_test.cc
+++ b/src/utils_test.cc
@@ -56,20 +56,44 @@
   EXPECT_DESCRIPTOR("short", "S");
 }
 
-TEST_F(UtilsTest, PrettyType) {
-  EXPECT_EQ("null", PrettyType(NULL));
+TEST_F(UtilsTest, PrettyTypeOf) {
+  EXPECT_EQ("null", PrettyTypeOf(NULL));
 
   String* s = String::AllocFromModifiedUtf8("");
-  EXPECT_EQ("java.lang.String", PrettyType(s));
+  EXPECT_EQ("java.lang.String", PrettyTypeOf(s));
 
   ShortArray* a = ShortArray::Alloc(2);
-  EXPECT_EQ("short[]", PrettyType(a));
+  EXPECT_EQ("short[]", PrettyTypeOf(a));
 
   Class* c = class_linker_->FindSystemClass("[Ljava/lang/String;");
   ASSERT_TRUE(c != NULL);
   Object* o = ObjectArray<String>::Alloc(c, 0);
-  EXPECT_EQ("java.lang.String[]", PrettyType(o));
-  EXPECT_EQ("java.lang.Class<java.lang.String[]>", PrettyType(o->GetClass()));
+  EXPECT_EQ("java.lang.String[]", PrettyTypeOf(o));
+  EXPECT_EQ("java.lang.Class<java.lang.String[]>", PrettyTypeOf(o->GetClass()));
+}
+
+TEST_F(UtilsTest, PrettyClass) {
+  EXPECT_EQ("null", PrettyClass(NULL));
+  Class* c = class_linker_->FindSystemClass("[Ljava/lang/String;");
+  ASSERT_TRUE(c != NULL);
+  Object* o = ObjectArray<String>::Alloc(c, 0);
+  EXPECT_EQ("java.lang.Class<java.lang.String[]>", PrettyClass(o->GetClass()));
+}
+
+TEST_F(UtilsTest, PrettyField) {
+  EXPECT_EQ("null", PrettyField(NULL));
+
+  Class* java_lang_String = class_linker_->FindSystemClass("Ljava/lang/String;");
+  Class* int_class = class_linker_->FindPrimitiveClass('I');
+  Class* char_array_class = class_linker_->FindSystemClass("[C");
+
+  Field* f;
+  f = java_lang_String->FindDeclaredInstanceField("count", int_class);
+  EXPECT_EQ("int java.lang.String.count", PrettyField(f));
+  EXPECT_EQ("java.lang.String.count", PrettyField(f, false));
+  f = java_lang_String->FindDeclaredInstanceField("value", char_array_class);
+  EXPECT_EQ("char[] java.lang.String.value", PrettyField(f));
+  EXPECT_EQ("java.lang.String.value", PrettyField(f, false));
 }
 
 TEST_F(UtilsTest, MangleForJni) {