Merge "Primitive type mismatches are now hard failures in the verifier." into dalvik-dev
diff --git a/src/compiler/codegen/MethodBitcode.cc b/src/compiler/codegen/MethodBitcode.cc
index 864372a..a8da1cd 100644
--- a/src/compiler/codegen/MethodBitcode.cc
+++ b/src/compiler/codegen/MethodBitcode.cc
@@ -2279,7 +2279,10 @@
     Instruction::Code dalvikOp = getDalvikOpcode(op, false, rlDest.wide);
     RegLocation rlSrc2;
     if (constRhs != NULL) {
-      DCHECK_EQ(dalvikOp, Instruction::NOT_LONG);
+      // ir_builder converts NOT_LONG to xor src, -1.  Restore
+      DCHECK_EQ(dalvikOp, Instruction::XOR_LONG);
+      DCHECK_EQ(-1L, constRhs->getSExtValue());
+      dalvikOp = Instruction::NOT_LONG;
       rlSrc2 = rlSrc1;
     } else {
       rlSrc2 = getLoc(cUnit, rhs);
diff --git a/src/globals.h b/src/globals.h
index 0efa7eb..1eeaca2 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -36,6 +36,7 @@
 const int kBitsPerByte = 8;
 const int kBitsPerByteLog2 = 3;
 const int kBitsPerWord = kWordSize * kBitsPerByte;
+const int kWordHighBitMask = 1 << (kBitsPerWord - 1);
 
 // Required stack alignment
 const int kStackAlignment = 16;
diff --git a/src/heap.cc b/src/heap.cc
index a96472b..626adf9 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -393,13 +393,15 @@
 
 #if VERIFY_OBJECT_ENABLED
 void Heap::VerifyObject(const Object* obj) {
-  if (this == NULL || !verify_objects_ || Runtime::Current()->IsShuttingDown() ||
+  if (obj == NULL || this == NULL || !verify_objects_ || Runtime::Current()->IsShuttingDown() ||
       Thread::Current() == NULL ||
       Runtime::Current()->GetThreadList()->GetLockOwner() == Thread::Current()->GetTid()) {
     return;
   }
-  ScopedHeapLock heap_lock;
-  Heap::VerifyObjectLocked(obj);
+  {
+    ScopedHeapLock heap_lock;
+    Heap::VerifyObjectLocked(obj);
+  }
 }
 #endif
 
@@ -412,39 +414,39 @@
 
 void Heap::VerifyObjectLocked(const Object* obj) {
   lock_->AssertHeld();
-  if (obj != NULL) {
-    if (!IsAligned<kObjectAlignment>(obj)) {
-      LOG(FATAL) << "Object isn't aligned: " << obj;
-    } else if (!GetLiveBitmap()->Test(obj)) {
-      Space* space = FindSpaceFromObject(obj);
-      if (space == NULL) {
-        DumpSpaces();
-        LOG(FATAL) << "Object " << obj << " is not contained in any space";
-      }
-      LOG(FATAL) << "Object is dead: " << obj << " in space " << *space;
+  if (!IsAligned<kObjectAlignment>(obj)) {
+    LOG(FATAL) << "Object isn't aligned: " << obj;
+  } else if (!GetLiveBitmap()->Test(obj)) {
+    Space* space = FindSpaceFromObject(obj);
+    if (space == NULL) {
+      DumpSpaces();
+      LOG(FATAL) << "Object " << obj << " is not contained in any space";
     }
-    // Ignore early dawn of the universe verifications
-    if (num_objects_allocated_ > 10) {
-      const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
-          Object::ClassOffset().Int32Value();
-      const Class* c = *reinterpret_cast<Class* const *>(raw_addr);
-      if (c == NULL) {
-        LOG(FATAL) << "Null class in object: " << obj;
-      } else if (!IsAligned<kObjectAlignment>(c)) {
-        LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
-      } else if (!GetLiveBitmap()->Test(c)) {
-        LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
-      }
-      // Check obj.getClass().getClass() == obj.getClass().getClass().getClass()
-      // Note: we don't use the accessors here as they have internal sanity checks
-      // that we don't want to run
-      raw_addr = reinterpret_cast<const byte*>(c) + Object::ClassOffset().Int32Value();
-      const Class* c_c = *reinterpret_cast<Class* const *>(raw_addr);
-      raw_addr = reinterpret_cast<const byte*>(c_c) + Object::ClassOffset().Int32Value();
-      const Class* c_c_c = *reinterpret_cast<Class* const *>(raw_addr);
-      CHECK_EQ(c_c, c_c_c);
-    }
+    LOG(FATAL) << "Object is dead: " << obj << " in space " << *space;
   }
+#if !VERIFY_OBJECT_FAST
+  // Ignore early dawn of the universe verifications
+  if (num_objects_allocated_ > 10) {
+    const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
+        Object::ClassOffset().Int32Value();
+    const Class* c = *reinterpret_cast<Class* const *>(raw_addr);
+    if (c == NULL) {
+      LOG(FATAL) << "Null class in object: " << obj;
+    } else if (!IsAligned<kObjectAlignment>(c)) {
+      LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
+    } else if (!GetLiveBitmap()->Test(c)) {
+      LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
+    }
+    // Check obj.getClass().getClass() == obj.getClass().getClass().getClass()
+    // Note: we don't use the accessors here as they have internal sanity checks
+    // that we don't want to run
+    raw_addr = reinterpret_cast<const byte*>(c) + Object::ClassOffset().Int32Value();
+    const Class* c_c = *reinterpret_cast<Class* const *>(raw_addr);
+    raw_addr = reinterpret_cast<const byte*>(c_c) + Object::ClassOffset().Int32Value();
+    const Class* c_c_c = *reinterpret_cast<Class* const *>(raw_addr);
+    CHECK_EQ(c_c, c_c_c);
+  }
+#endif
 }
 
 void Heap::VerificationCallback(Object* obj, void* arg) {
diff --git a/src/heap.h b/src/heap.h
index a1b1bd9..e908248 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -31,6 +31,9 @@
 
 #define VERIFY_OBJECT_ENABLED 0
 
+// Fast verification means we do not verify the classes of objects.
+#define VERIFY_OBJECT_FAST 1
+
 namespace art {
 
 class AllocSpace;
diff --git a/src/space.cc b/src/space.cc
index 24eca26..02230e1 100644
--- a/src/space.cc
+++ b/src/space.cc
@@ -206,7 +206,7 @@
   growth_limit_ = RoundUp(size, kPageSize);
   // FIXME: Do we need reference counted pointers here?
   // Make the two spaces share the same mark bitmaps since the bitmaps span both of the spaces.
-  VLOG(heap) << "Creating new alloc space: ";
+  VLOG(heap) << "Creating new AllocSpace: ";
   VLOG(heap) << "Size " << mem_map_->Size();
   VLOG(heap) << "GrowthLimit " << PrettySize(growth_limit);
   VLOG(heap) << "Capacity " << PrettySize(capacity);
@@ -218,8 +218,10 @@
     CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), name_.c_str());
   }
   AllocSpace* alloc_space = new AllocSpace(name_, mem_map.release(), mspace, end_, end, growth_limit);
-  live_bitmap_->Trim(Capacity()); // TODO - kPageSize?
-  mark_bitmap_->Trim(Capacity()); // TODO - kPageSize?
+  live_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(end_));
+  CHECK(live_bitmap_->HeapLimit() == reinterpret_cast<uintptr_t>(end_));
+  mark_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(end_));
+  CHECK(mark_bitmap_->HeapLimit() == reinterpret_cast<uintptr_t>(end_));
   name_ += "-zygote-transformed";
   VLOG(heap) << "zygote space creation done";
   return alloc_space;
diff --git a/src/space_bitmap.cc b/src/space_bitmap.cc
index 74bc07c..7da8146 100644
--- a/src/space_bitmap.cc
+++ b/src/space_bitmap.cc
@@ -38,8 +38,9 @@
 // Clean up any resources associated with the bitmap.
 SpaceBitmap::~SpaceBitmap() {}
 
-void SpaceBitmap::Trim(size_t heap_capacity) {
-  size_t new_size = OffsetToIndex(RoundUp(heap_capacity, kAlignment * kBitsPerWord)) * kWordSize;
+void SpaceBitmap::SetHeapLimit(uintptr_t new_end) {
+  DCHECK(IsAligned<kBitsPerWord * kAlignment>(new_end));
+  size_t new_size = OffsetToIndex(new_end - heap_begin_) * kWordSize;
   if (new_size < bitmap_size_) {
     bitmap_size_ = new_size;
   }
@@ -84,13 +85,12 @@
   for (uintptr_t i = 0; i <= end; ++i) {
     word w = bitmap_begin_[i];
     if (UNLIKELY(w != 0)) {
-      word high_bit = 1 << (kBitsPerWord - 1);
       uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
       while (w != 0) {
-        const int shift = CLZ(w);
+        const size_t shift = CLZ(w);
         Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
         (*callback)(obj, arg);
-        w &= ~(high_bit >> shift);
+        w ^= static_cast<size_t>(kWordHighBitMask) >> shift;
       }
     }
   }
@@ -130,14 +130,13 @@
     for (size_t i = start; i <= end; i++) {
       word w = bitmap_begin_[i];
       if (UNLIKELY(w != 0)) {
-        word high_bit = 1 << (kBitsPerWord - 1);
         uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
         void* finger = reinterpret_cast<void*>(IndexToOffset(i + 1) + heap_begin_);
         while (w != 0) {
-          const int shift = CLZ(w);
+          const size_t shift = CLZ(w);
           Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
           (*callback)(obj, finger, arg);
-          w &= ~(high_bit >> shift);
+          w ^= static_cast<size_t>(kWordHighBitMask) >> shift;
         }
       }
     }
@@ -146,14 +145,13 @@
     for (size_t i = start; i <= end; i++) {
       word w = bitmap_begin_[i];
       if (UNLIKELY(w != 0)) {
-        word high_bit = 1 << (kBitsPerWord - 1);
         uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
         void* finger = reinterpret_cast<void*>(IndexToOffset(i + 1) + heap_begin_);
         while (w != 0) {
-          const int shift = CLZ(w);
+          const size_t shift = CLZ(w);
           Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
           (*callback)(obj, finger, arg);
-          w &= ~(high_bit >> shift);
+          w ^= static_cast<size_t>(kWordHighBitMask) >> shift;
         }
       }
       // update 'end' in case callback modified bitmap
@@ -194,11 +192,10 @@
   for (size_t i = start; i <= end; i++) {
     word garbage = live[i] & ~mark[i];
     if (UNLIKELY(garbage != 0)) {
-      word high_bit = 1 << (kBitsPerWord - 1);
       uintptr_t ptr_base = IndexToOffset(i) + live_bitmap.heap_begin_;
       while (garbage != 0) {
-        int shift = CLZ(garbage);
-        garbage &= ~(high_bit >> shift);
+        const size_t shift = CLZ(garbage);
+        garbage ^= static_cast<size_t>(kWordHighBitMask) >> shift;
         *pb++ = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
       }
       // Make sure that there are always enough slots available for an
@@ -302,13 +299,12 @@
   for (uintptr_t i = 0; i <= end; ++i) {
     word w = bitmap_begin_[i];
     if (UNLIKELY(w != 0)) {
-      word high_bit = 1 << (kBitsPerWord - 1);
       uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
       while (w != 0) {
-        const int shift = CLZ(w);
+        const size_t shift = CLZ(w);
         Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
         WalkFieldsInOrder(visited.get(), callback, obj, arg);
-        w &= ~(high_bit >> shift);
+        w ^= static_cast<size_t>(kWordHighBitMask) >> shift;
       }
     }
   }
diff --git a/src/space_bitmap.h b/src/space_bitmap.h
index 1e8a9a7..adf1996 100644
--- a/src/space_bitmap.h
+++ b/src/space_bitmap.h
@@ -60,7 +60,7 @@
 
   // Pack the bits in backwards so they come out in address order when using CLZ.
   static word OffsetToMask(uintptr_t offset_) {
-    return 1 << (kBitsPerWord - 1 - (offset_ / kAlignment) % kBitsPerWord);
+    return static_cast<uintptr_t>(kWordHighBitMask) >> ((offset_ / kAlignment) % kBitsPerWord);
   }
 
   inline void Set(const Object* obj) {
@@ -119,8 +119,7 @@
 
     size_t word_start = bit_index_start / kBitsPerWord;
     size_t word_end = bit_index_end / kBitsPerWord;
-
-    const size_t high_bit = 1 << (kBitsPerWord - 1);
+    DCHECK_LT(word_end * kWordSize, Size());
 
     // Trim off left_bits of left bits.
     size_t edge_word = bitmap_begin_[word_start];
@@ -138,7 +137,7 @@
         const size_t shift = CLZ(edge_word);
         Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
         visitor(obj);
-        edge_word &= ~(high_bit >> shift);
+        edge_word ^= static_cast<size_t>(kWordHighBitMask) >> shift;
       } while (edge_word != 0);
     }
     word_start++;
@@ -151,7 +150,7 @@
           const size_t shift = CLZ(w);
           Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
           visitor(obj);
-          w &= ~(high_bit >> shift);
+          w ^= static_cast<size_t>(kWordHighBitMask) >> shift;
         } while (w != 0);
       }
     }
@@ -169,10 +168,10 @@
     edge_word &= ~((1 << trim_bits) - 1);
     uintptr_t ptr_base = IndexToOffset(word_end) + heap_begin_;
     while (edge_word != 0) {
-      const int shift = CLZ(edge_word);
+      const size_t shift = CLZ(edge_word);
       Object* obj = reinterpret_cast<Object*>(ptr_base + shift * kAlignment);
       visitor(obj);
-      edge_word &= ~(high_bit >> shift);
+      edge_word ^= static_cast<size_t>(kWordHighBitMask) >> shift;
     }
   }
 
@@ -202,11 +201,17 @@
     return IndexToOffset(Size() / kWordSize);
   }
 
-  uintptr_t HeapBegin() {
+  uintptr_t HeapBegin() const {
     return heap_begin_;
   }
 
-  void Trim(size_t heap_capcity);
+  // The maximum address which the bitmap can span. (HeapBegin() <= object < HeapLimit()).
+  uintptr_t HeapLimit() const {
+    return HeapBegin() + static_cast<uintptr_t>(HeapSize());
+  }
+
+  // Set the max address which can covered by the bitmap.
+  void SetHeapLimit(uintptr_t new_end);
 
  private:
   // TODO: heap_end_ is initialized so that the heap bitmap is empty, this doesn't require the -1,
diff --git a/src/space_test.cc b/src/space_test.cc
index f377a61..c1c1dca 100644
--- a/src/space_test.cc
+++ b/src/space_test.cc
@@ -70,6 +70,10 @@
   }
 }
 
+// TODO: This test is not very good, we should improve it.
+// The test should do more allocations before the creation of the ZygoteSpace, and then do
+// allocations after the ZygoteSpace is created. The test should also do some GCs to ensure that
+// the GC works with the ZygoteSpace.
 TEST_F(SpaceTest, ZygoteSpace) {
     AllocSpace* space(Space::CreateAllocSpace("test", 4 * MB, 16 * MB, 16 * MB, NULL));
     ASSERT_TRUE(space != NULL);