Merge "Add --64 option for run-all-tests."
diff --git a/runtime/Android.mk b/runtime/Android.mk
index bc971a9..72f1774 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -61,6 +61,7 @@
 	gc/collector/sticky_mark_sweep.cc \
 	gc/gc_cause.cc \
 	gc/heap.cc \
+	gc/reference_processor.cc \
 	gc/reference_queue.cc \
 	gc/space/bump_pointer_space.cc \
 	gc/space/dlmalloc_space.cc \
@@ -114,6 +115,7 @@
 	native/java_lang_Thread.cc \
 	native/java_lang_Throwable.cc \
 	native/java_lang_VMClassLoader.cc \
+	native/java_lang_ref_Reference.cc \
 	native/java_lang_reflect_Array.cc \
 	native/java_lang_reflect_Constructor.cc \
 	native/java_lang_reflect_Field.cc \
diff --git a/runtime/arch/x86/asm_support_x86.S b/runtime/arch/x86/asm_support_x86.S
index d7c88ba..909bd3e 100644
--- a/runtime/arch/x86/asm_support_x86.S
+++ b/runtime/arch/x86/asm_support_x86.S
@@ -86,7 +86,13 @@
     // Symbols.
 #if !defined(__APPLE__)
     #define SYMBOL(name) name
-    #define PLT_SYMBOL(name) name  // ## @PLT  // TODO: Disabled for old clang 3.3
+    #if defined(__clang__) && (__clang_major__ < 4) && (__clang_minor__ < 5)
+        // TODO: Disabled for old clang 3.3, this leads to text reolocations and there should be a
+        // better fix.
+        #define PLT_SYMBOL(name) name // ## @PLT
+    #else
+        #define PLT_SYMBOL(name) name ## @PLT
+    #endif
 #else
     // Mac OS' symbols have an _ prefix.
     #define SYMBOL(name) _ ## name
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index b8051c9..ff2eda0 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -31,6 +31,7 @@
 #include "gc/accounting/mod_union_table.h"
 #include "gc/accounting/space_bitmap-inl.h"
 #include "gc/heap.h"
+#include "gc/reference_processor.h"
 #include "gc/space/image_space.h"
 #include "gc/space/large_object_space.h"
 #include "gc/space/space-inl.h"
@@ -166,18 +167,9 @@
 void MarkSweep::ProcessReferences(Thread* self) {
   TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-  GetHeap()->ProcessReferences(timings_, clear_soft_references_, &IsMarkedCallback,
-                               &MarkObjectCallback, &ProcessMarkStackPausedCallback, this);
-}
-
-void MarkSweep::PreProcessReferences() {
-  if (IsConcurrent()) {
-    // No reason to do this for non-concurrent GC since pre processing soft references only helps
-    // pauses.
-    timings_.NewSplit("PreProcessReferences");
-    GetHeap()->ProcessSoftReferences(timings_, clear_soft_references_, &IsMarkedCallback,
-                                     &MarkObjectCallback, &ProcessMarkStackPausedCallback, this);
-  }
+  GetHeap()->GetReferenceProcessor()->ProcessReferences(
+      true, &timings_, clear_soft_references_, &IsMarkedCallback, &MarkObjectCallback,
+      &ProcessMarkStackCallback, this);
 }
 
 void MarkSweep::PausePhase() {
@@ -192,7 +184,6 @@
     // Scan dirty objects, this is only required if we are not doing concurrent GC.
     RecursiveMarkDirtyObjects(true, accounting::CardTable::kCardDirty);
   }
-  ProcessReferences(self);
   {
     TimingLogger::ScopedSplit split("SwapStacks", &timings_);
     WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
@@ -210,6 +201,9 @@
   // incorrectly sweep it. This also fixes a race where interning may attempt to return a strong
   // reference to a string that is about to be swept.
   Runtime::Current()->DisallowNewSystemWeaks();
+  // Enable the reference processing slow path, needs to be done with mutators paused since there
+  // is no lock in the GetReferent fast path.
+  GetHeap()->GetReferenceProcessor()->EnableSlowPath();
 }
 
 void MarkSweep::PreCleanCards() {
@@ -265,7 +259,6 @@
   MarkReachableObjects();
   // Pre-clean dirtied cards to reduce pauses.
   PreCleanCards();
-  PreProcessReferences();
 }
 
 void MarkSweep::UpdateAndMarkModUnion() {
@@ -290,6 +283,8 @@
 void MarkSweep::ReclaimPhase() {
   TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
   Thread* self = Thread::Current();
+  // Process the references concurrently.
+  ProcessReferences(self);
   SweepSystemWeaks(self);
   Runtime::Current()->AllowNewSystemWeaks();
   {
@@ -1168,7 +1163,7 @@
   if (kCountJavaLangRefs) {
     ++reference_count_;
   }
-  heap_->DelayReferenceReferent(klass, ref, IsMarkedCallback, this);
+  heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, IsMarkedCallback, this);
 }
 
 class MarkObjectVisitor {
@@ -1198,8 +1193,8 @@
   ScanObjectVisit(obj, mark_visitor, ref_visitor);
 }
 
-void MarkSweep::ProcessMarkStackPausedCallback(void* arg) {
-  reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(true);
+void MarkSweep::ProcessMarkStackCallback(void* arg) {
+  reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(false);
 }
 
 void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index bfc70d1..3ebc0af 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -123,10 +123,6 @@
   void ProcessReferences(Thread* self)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void PreProcessReferences()
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
   // Update and mark references from immune spaces.
   void UpdateAndMarkModUnion()
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -191,8 +187,9 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
-  static void ProcessMarkStackPausedCallback(void* arg)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+  static void ProcessMarkStackCallback(void* arg)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   static void MarkRootParallelCallback(mirror::Object** root, void* arg, uint32_t thread_id,
                                        RootType root_type)
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index a0659e7..a406f6d 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -30,6 +30,7 @@
 #include "gc/accounting/remembered_set.h"
 #include "gc/accounting/space_bitmap-inl.h"
 #include "gc/heap.h"
+#include "gc/reference_processor.h"
 #include "gc/space/bump_pointer_space.h"
 #include "gc/space/bump_pointer_space-inl.h"
 #include "gc/space/image_space.h"
@@ -162,8 +163,9 @@
 void SemiSpace::ProcessReferences(Thread* self) {
   TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
   WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
-  GetHeap()->ProcessReferences(timings_, clear_soft_references_, &MarkedForwardingAddressCallback,
-                               &MarkObjectCallback, &ProcessMarkStackCallback, this);
+  GetHeap()->GetReferenceProcessor()->ProcessReferences(
+      false, &timings_, clear_soft_references_, &MarkedForwardingAddressCallback,
+      &MarkObjectCallback, &ProcessMarkStackCallback, this);
 }
 
 void SemiSpace::MarkingPhase() {
@@ -694,7 +696,8 @@
 // Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
 // marked, put it on the appropriate list in the heap for later processing.
 void SemiSpace::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
-  heap_->DelayReferenceReferent(klass, reference, MarkedForwardingAddressCallback, this);
+  heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference,
+                                                         MarkedForwardingAddressCallback, this);
 }
 
 class SemiSpaceMarkObjectVisitor {
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 5d38b02..f2919e8 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -39,6 +39,7 @@
 #include "gc/collector/partial_mark_sweep.h"
 #include "gc/collector/semi_space.h"
 #include "gc/collector/sticky_mark_sweep.h"
+#include "gc/reference_processor.h"
 #include "gc/space/bump_pointer_space.h"
 #include "gc/space/dlmalloc_space-inl.h"
 #include "gc/space/image_space.h"
@@ -175,7 +176,7 @@
     large_object_threshold_ = kDefaultLargeObjectThreshold;
     // Background compaction is currently not supported for command line runs.
     if (background_collector_type_ != foreground_collector_type_) {
-      LOG(WARNING) << "Disabling background compaction for non zygote";
+      VLOG(heap) << "Disabling background compaction for non zygote";
       background_collector_type_ = foreground_collector_type_;
     }
   }
@@ -771,102 +772,6 @@
   return FindDiscontinuousSpaceFromObject(obj, true);
 }
 
-struct SoftReferenceArgs {
-  IsMarkedCallback* is_marked_callback_;
-  MarkObjectCallback* mark_callback_;
-  void* arg_;
-};
-
-mirror::Object* Heap::PreserveSoftReferenceCallback(mirror::Object* obj, void* arg) {
-  SoftReferenceArgs* args = reinterpret_cast<SoftReferenceArgs*>(arg);
-  // TODO: Not preserve all soft references.
-  return args->mark_callback_(obj, args->arg_);
-}
-
-void Heap::ProcessSoftReferences(TimingLogger& timings, bool clear_soft,
-                                 IsMarkedCallback* is_marked_callback,
-                                 MarkObjectCallback* mark_object_callback,
-                                 ProcessMarkStackCallback* process_mark_stack_callback, void* arg) {
-  // Unless required to clear soft references with white references, preserve some white referents.
-  if (!clear_soft) {
-    // Don't clear for sticky GC.
-    SoftReferenceArgs soft_reference_args;
-    soft_reference_args.is_marked_callback_ = is_marked_callback;
-    soft_reference_args.mark_callback_ = mark_object_callback;
-    soft_reference_args.arg_ = arg;
-    // References with a marked referent are removed from the list.
-    soft_reference_queue_.PreserveSomeSoftReferences(&PreserveSoftReferenceCallback,
-                                                     &soft_reference_args);
-    process_mark_stack_callback(arg);
-  }
-}
-
-// Process reference class instances and schedule finalizations.
-void Heap::ProcessReferences(TimingLogger& timings, bool clear_soft,
-                             IsMarkedCallback* is_marked_callback,
-                             MarkObjectCallback* mark_object_callback,
-                             ProcessMarkStackCallback* process_mark_stack_callback, void* arg) {
-  timings.StartSplit("(Paused)ProcessReferences");
-  ProcessSoftReferences(timings, clear_soft, is_marked_callback, mark_object_callback,
-                        process_mark_stack_callback, arg);
-  // Clear all remaining soft and weak references with white referents.
-  soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
-  weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
-  timings.EndSplit();
-  // Preserve all white objects with finalize methods and schedule them for finalization.
-  timings.StartSplit("(Paused)EnqueueFinalizerReferences");
-  finalizer_reference_queue_.EnqueueFinalizerReferences(cleared_references_, is_marked_callback,
-                                                        mark_object_callback, arg);
-  process_mark_stack_callback(arg);
-  timings.EndSplit();
-  timings.StartSplit("(Paused)ProcessReferences");
-  // Clear all f-reachable soft and weak references with white referents.
-  soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
-  weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
-  // Clear all phantom references with white referents.
-  phantom_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
-  // At this point all reference queues other than the cleared references should be empty.
-  DCHECK(soft_reference_queue_.IsEmpty());
-  DCHECK(weak_reference_queue_.IsEmpty());
-  DCHECK(finalizer_reference_queue_.IsEmpty());
-  DCHECK(phantom_reference_queue_.IsEmpty());
-  timings.EndSplit();
-}
-
-// Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
-// marked, put it on the appropriate list in the heap for later processing.
-void Heap::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
-                                  IsMarkedCallback is_marked_callback, void* arg) {
-  // klass can be the class of the old object if the visitor already updated the class of ref.
-  DCHECK(klass->IsReferenceClass());
-  mirror::Object* referent = ref->GetReferent();
-  if (referent != nullptr) {
-    mirror::Object* forward_address = is_marked_callback(referent, arg);
-    // Null means that the object is not currently marked.
-    if (forward_address == nullptr) {
-      Thread* self = Thread::Current();
-      // TODO: Remove these locks, and use atomic stacks for storing references?
-      // We need to check that the references haven't already been enqueued since we can end up
-      // scanning the same reference multiple times due to dirty cards.
-      if (klass->IsSoftReferenceClass()) {
-        soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
-      } else if (klass->IsWeakReferenceClass()) {
-        weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
-      } else if (klass->IsFinalizerReferenceClass()) {
-        finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
-      } else if (klass->IsPhantomReferenceClass()) {
-        phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
-      } else {
-        LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
-                   << klass->GetAccessFlags();
-      }
-    } else if (referent != forward_address) {
-      // Referent is already marked and we need to update it.
-      ref->SetReferent<false>(forward_address);
-    }
-  }
-}
-
 space::ImageSpace* Heap::GetImageSpace() const {
   for (const auto& space : continuous_spaces_) {
     if (space->IsImageSpace()) {
@@ -1477,7 +1382,7 @@
   ChangeCollector(collector_type);
   tl->ResumeAll();
   // Can't call into java code with all threads suspended.
-  EnqueueClearedReferences();
+  reference_processor_.EnqueueClearedReferences();
   uint64_t duration = NanoTime() - start_time;
   GrowForUtilization(semi_space_collector_);
   FinishGC(self, collector::kGcTypeFull);
@@ -1881,7 +1786,7 @@
   total_bytes_freed_ever_ += collector->GetFreedBytes();
   RequestHeapTrim();
   // Enqueue cleared references.
-  EnqueueClearedReferences();
+  reference_processor_.EnqueueClearedReferences();
   // Grow the heap so that we know when to perform the next GC.
   GrowForUtilization(collector);
   const size_t duration = collector->GetDurationNs();
@@ -1952,9 +1857,9 @@
 // Verify a reference from an object.
 class VerifyReferenceVisitor {
  public:
-  explicit VerifyReferenceVisitor(Heap* heap)
+  explicit VerifyReferenceVisitor(Heap* heap, bool verify_referent)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
-      : heap_(heap), failed_(false) {}
+      : heap_(heap), failed_(false), verify_referent_(verify_referent) {}
 
   bool Failed() const {
     return failed_;
@@ -1962,7 +1867,9 @@
 
   void operator()(mirror::Class* klass, mirror::Reference* ref) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    this->operator()(ref, mirror::Reference::ReferentOffset(), false);
+    if (verify_referent_) {
+      this->operator()(ref, mirror::Reference::ReferentOffset(), false);
+    }
   }
 
   void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const
@@ -2079,18 +1986,21 @@
  private:
   Heap* const heap_;
   mutable bool failed_;
+  bool verify_referent_;
 };
 
 // Verify all references within an object, for use with HeapBitmap::Visit.
 class VerifyObjectVisitor {
  public:
-  explicit VerifyObjectVisitor(Heap* heap) : heap_(heap), failed_(false) {}
+  explicit VerifyObjectVisitor(Heap* heap, bool verify_referent)
+      : heap_(heap), failed_(false), verify_referent_(verify_referent) {
+  }
 
   void operator()(mirror::Object* obj) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
     // Note: we are verifying the references in obj but not obj itself, this is because obj must
     // be live or else how did we find it in the live bitmap?
-    VerifyReferenceVisitor visitor(heap_);
+    VerifyReferenceVisitor visitor(heap_, verify_referent_);
     // The class doesn't count as a reference but we should verify it anyways.
     obj->VisitReferences<true>(visitor, visitor);
     failed_ = failed_ || visitor.Failed();
@@ -2109,10 +2019,11 @@
  private:
   Heap* const heap_;
   mutable bool failed_;
+  const bool verify_referent_;
 };
 
 // Must do this with mutators suspended since we are directly accessing the allocation stacks.
-bool Heap::VerifyHeapReferences() {
+bool Heap::VerifyHeapReferences(bool verify_referents) {
   Thread* self = Thread::Current();
   Locks::mutator_lock_->AssertExclusiveHeld(self);
   // Lets sort our allocation stacks so that we can efficiently binary search them.
@@ -2121,7 +2032,7 @@
   // Since we sorted the allocation stack content, need to revoke all
   // thread-local allocation stacks.
   RevokeAllThreadLocalAllocationStacks(self);
-  VerifyObjectVisitor visitor(this);
+  VerifyObjectVisitor visitor(this, verify_referents);
   // Verify objects in the allocation stack since these will be objects which were:
   // 1. Allocated prior to the GC (pre GC verification).
   // 2. Allocated during the GC (pre sweep GC verification).
@@ -2399,7 +2310,9 @@
     // Swapping bound bitmaps does nothing.
     gc->SwapBitmaps();
     SwapSemiSpaces();
-    if (!VerifyHeapReferences()) {
+    // Pass in false since concurrent reference processing can mean that the reference referents
+    // may point to dead objects at the point which PreSweepingGcVerification is called.
+    if (!VerifyHeapReferences(false)) {
       LOG(FATAL) << "Pre sweeping " << gc->GetName() << " GC verification failed";
     }
     SwapSemiSpaces();
@@ -2622,27 +2535,10 @@
   *object = soa.Decode<mirror::Object*>(arg.get());
 }
 
-void Heap::EnqueueClearedReferences() {
-  Thread* self = Thread::Current();
-  Locks::mutator_lock_->AssertNotHeld(self);
-  if (!cleared_references_.IsEmpty()) {
-    // When a runtime isn't started there are no reference queues to care about so ignore.
-    if (LIKELY(Runtime::Current()->IsStarted())) {
-      ScopedObjectAccess soa(self);
-      ScopedLocalRef<jobject> arg(self->GetJniEnv(),
-                                  soa.AddLocalReference<jobject>(cleared_references_.GetList()));
-      jvalue args[1];
-      args[0].l = arg.get();
-      InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_ReferenceQueue_add, args);
-    }
-    cleared_references_.Clear();
-  }
-}
-
 void Heap::RequestConcurrentGC(Thread* self) {
   // Make sure that we can do a concurrent GC.
   Runtime* runtime = Runtime::Current();
-  if (runtime == NULL || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self) ||
+  if (runtime == nullptr || !runtime->IsFinishedStarting() || runtime->IsShuttingDown(self) ||
       self->IsHandlingStackOverflow()) {
     return;
   }
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 7a9ef1e..f71de1a 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -35,7 +35,7 @@
 #include "jni.h"
 #include "object_callbacks.h"
 #include "offsets.h"
-#include "reference_queue.h"
+#include "reference_processor.h"
 #include "safe_map.h"
 #include "thread_pool.h"
 #include "verify_object.h"
@@ -54,6 +54,9 @@
 }  // namespace mirror
 
 namespace gc {
+
+class ReferenceProcessor;
+
 namespace accounting {
   class HeapBitmap;
   class ModUnionTable;
@@ -215,7 +218,7 @@
 
   // Check sanity of all live references.
   void VerifyHeap() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
-  bool VerifyHeapReferences()
+  bool VerifyHeapReferences(bool verify_referents = true)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
   bool VerifyMissingCardMarks()
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
@@ -314,21 +317,6 @@
     return discontinuous_spaces_;
   }
 
-  static mirror::Object* PreserveSoftReferenceCallback(mirror::Object* obj, void* arg);
-  void ProcessSoftReferences(TimingLogger& timings, bool clear_soft,
-                             IsMarkedCallback* is_marked_callback,
-                             MarkObjectCallback* mark_object_callback,
-                             ProcessMarkStackCallback* process_mark_stack_callback, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-  void ProcessReferences(TimingLogger& timings, bool clear_soft,
-                         IsMarkedCallback* is_marked_callback,
-                         MarkObjectCallback* mark_object_callback,
-                         ProcessMarkStackCallback* process_mark_stack_callback,
-                         void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
-      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
   // Enable verification of object references when the runtime is sufficiently initialized.
   void EnableObjectValidation() {
     verify_object_mode_ = kVerifyObjectSupport;
@@ -565,6 +553,10 @@
   }
   bool HasImageSpace() const;
 
+  ReferenceProcessor* GetReferenceProcessor() {
+    return &reference_processor_;
+  }
+
  private:
   void Compact(space::ContinuousMemMapAllocSpace* target_space,
                space::ContinuousMemMapAllocSpace* source_space)
@@ -631,12 +623,6 @@
   bool IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void EnqueueClearedReferences();
-  // Returns true if the reference object has not yet been enqueued.
-  void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
-                              IsMarkedCallback is_marked_callback, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
   // Run the finalizers.
   void RunFinalization(JNIEnv* env);
 
@@ -797,12 +783,8 @@
   Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
   UniquePtr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
 
-  // Reference queues.
-  ReferenceQueue soft_reference_queue_;
-  ReferenceQueue weak_reference_queue_;
-  ReferenceQueue finalizer_reference_queue_;
-  ReferenceQueue phantom_reference_queue_;
-  ReferenceQueue cleared_references_;
+  // Reference processor;
+  ReferenceProcessor reference_processor_;
 
   // True while the garbage collector is running.
   volatile CollectorType collector_type_running_ GUARDED_BY(gc_complete_lock_);
diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc
new file mode 100644
index 0000000..ef9e1d4
--- /dev/null
+++ b/runtime/gc/reference_processor.cc
@@ -0,0 +1,222 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "reference_processor.h"
+
+#include "mirror/object-inl.h"
+#include "mirror/reference-inl.h"
+#include "reflection.h"
+#include "ScopedLocalRef.h"
+#include "scoped_thread_state_change.h"
+#include "well_known_classes.h"
+
+namespace art {
+namespace gc {
+
+ReferenceProcessor::ReferenceProcessor()
+    : process_references_args_(nullptr, nullptr, nullptr), slow_path_enabled_(false),
+      preserving_references_(false), lock_("reference processor lock"),
+      condition_("reference processor condition", lock_) {
+}
+
+void ReferenceProcessor::EnableSlowPath() {
+  Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
+  slow_path_enabled_ = true;
+}
+
+void ReferenceProcessor::DisableSlowPath(Thread* self) {
+  slow_path_enabled_ = false;
+  // Set to null so that GetReferent knows to not attempt to use the callback for seeing if
+  // referents are marked.
+  process_references_args_.is_marked_callback_ = nullptr;
+  condition_.Broadcast(self);
+}
+
+mirror::Object* ReferenceProcessor::GetReferent(Thread* self, mirror::Reference* reference) {
+  mirror::Object* const referent = reference->GetReferent();
+  if (LIKELY(!slow_path_enabled_)) {
+    return referent;
+  }
+  // Another fast path, the referent is cleared, we can just return null since there is no scenario
+  // where it becomes non-null.
+  if (referent == nullptr) {
+    return nullptr;
+  }
+  MutexLock mu(self, lock_);
+  while (slow_path_enabled_) {
+    // Try to see if the referent is already marked by using the is_marked_callback. We can return
+    // it to the mutator as long as the GC is not preserving references. If the GC is
+    // preserving references, the mutator could take a white field and move it somewhere else
+    // in the heap causing corruption since this field would get swept.
+    IsMarkedCallback* const is_marked_callback = process_references_args_.is_marked_callback_;
+    if (!preserving_references_ && is_marked_callback != nullptr) {
+      mirror::Object* const referent = reference->GetReferent();
+      mirror::Object* const obj = is_marked_callback(referent, process_references_args_.arg_);
+      // If it's null it means not marked, but it could become marked if the referent is reachable
+      // by finalizer referents. So we can not return in this case and must block.
+      if (obj != nullptr) {
+        return obj;
+      }
+    }
+    condition_.Wait(self);
+  }
+  return reference->GetReferent();
+}
+
+mirror::Object* ReferenceProcessor::PreserveSoftReferenceCallback(mirror::Object* obj, void* arg) {
+  auto* const args = reinterpret_cast<ProcessReferencesArgs*>(arg);
+  // TODO: Not preserve all soft references.
+  return args->mark_callback_(obj, args->arg_);
+}
+
+void ReferenceProcessor::StartPreservingReferences(Thread* self) {
+  MutexLock mu(self, lock_);
+  preserving_references_ = true;
+}
+
+void ReferenceProcessor::StopPreservingReferences(Thread* self) {
+  MutexLock mu(self, lock_);
+  preserving_references_ = false;
+  // We are done preserving references, some people who are blocked may see a marked referent.
+  condition_.Broadcast(self);
+}
+
+// Process reference class instances and schedule finalizations.
+void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timings,
+                                           bool clear_soft_references,
+                                           IsMarkedCallback* is_marked_callback,
+                                           MarkObjectCallback* mark_object_callback,
+                                           ProcessMarkStackCallback* process_mark_stack_callback,
+                                           void* arg) {
+  Thread* self = Thread::Current();
+  {
+    MutexLock mu(self, lock_);
+    process_references_args_.is_marked_callback_ = is_marked_callback;
+    process_references_args_.mark_callback_ = mark_object_callback;
+    process_references_args_.arg_ = arg;
+  }
+  if (concurrent) {
+    MutexLock mu(self, lock_);
+    CHECK(slow_path_enabled_) << "Slow path must be enabled for concurrent reference processing";
+    timings->StartSplit("ProcessReferences");
+  } else {
+    timings->StartSplit("(Paused)ProcessReferences");
+  }
+  // Unless required to clear soft references with white references, preserve some white referents.
+  if (!clear_soft_references) {
+    TimingLogger::ScopedSplit split(concurrent ? "PreserveSomeSoftReferences" :
+        "(Paused)PreserveSomeSoftReferences", timings);
+    if (concurrent) {
+      StartPreservingReferences(self);
+    }
+    // References with a marked referent are removed from the list.
+    soft_reference_queue_.PreserveSomeSoftReferences(&PreserveSoftReferenceCallback,
+                                                     &process_references_args_);
+
+    process_mark_stack_callback(arg);
+    if (concurrent) {
+      StopPreservingReferences(self);
+    }
+  }
+  // Clear all remaining soft and weak references with white referents.
+  soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
+  weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
+  {
+    TimingLogger::ScopedSplit split(concurrent ? "EnqueueFinalizerReferences" :
+        "(Paused)EnqueueFinalizerReferences", timings);
+    if (concurrent) {
+      StartPreservingReferences(self);
+    }
+    // Preserve all white objects with finalize methods and schedule them for finalization.
+    finalizer_reference_queue_.EnqueueFinalizerReferences(cleared_references_, is_marked_callback,
+                                                          mark_object_callback, arg);
+    process_mark_stack_callback(arg);
+    if (concurrent) {
+      StopPreservingReferences(self);
+    }
+  }
+  // Clear all finalizer referent reachable soft and weak references with white referents.
+  soft_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
+  weak_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
+  // Clear all phantom references with white referents.
+  phantom_reference_queue_.ClearWhiteReferences(cleared_references_, is_marked_callback, arg);
+  // At this point all reference queues other than the cleared references should be empty.
+  DCHECK(soft_reference_queue_.IsEmpty());
+  DCHECK(weak_reference_queue_.IsEmpty());
+  DCHECK(finalizer_reference_queue_.IsEmpty());
+  DCHECK(phantom_reference_queue_.IsEmpty());
+  if (concurrent) {
+    MutexLock mu(self, lock_);
+    // Done processing, disable the slow path and broadcast to the waiters.
+    DisableSlowPath(self);
+  }
+  timings->EndSplit();
+}
+
+// Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
+// marked, put it on the appropriate list in the heap for later processing.
+void ReferenceProcessor::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
+                                                IsMarkedCallback is_marked_callback, void* arg) {
+  // klass can be the class of the old object if the visitor already updated the class of ref.
+  DCHECK(klass->IsReferenceClass());
+  mirror::Object* referent = ref->GetReferent();
+  if (referent != nullptr) {
+    mirror::Object* forward_address = is_marked_callback(referent, arg);
+    // Null means that the object is not currently marked.
+    if (forward_address == nullptr) {
+      Thread* self = Thread::Current();
+      // TODO: Remove these locks, and use atomic stacks for storing references?
+      // We need to check that the references haven't already been enqueued since we can end up
+      // scanning the same reference multiple times due to dirty cards.
+      if (klass->IsSoftReferenceClass()) {
+        soft_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
+      } else if (klass->IsWeakReferenceClass()) {
+        weak_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
+      } else if (klass->IsFinalizerReferenceClass()) {
+        finalizer_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
+      } else if (klass->IsPhantomReferenceClass()) {
+        phantom_reference_queue_.AtomicEnqueueIfNotEnqueued(self, ref);
+      } else {
+        LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
+                   << klass->GetAccessFlags();
+      }
+    } else if (referent != forward_address) {
+      // Referent is already marked and we need to update it.
+      ref->SetReferent<false>(forward_address);
+    }
+  }
+}
+
+void ReferenceProcessor::EnqueueClearedReferences() {
+  Thread* self = Thread::Current();
+  Locks::mutator_lock_->AssertNotHeld(self);
+  if (!cleared_references_.IsEmpty()) {
+    // When a runtime isn't started there are no reference queues to care about so ignore.
+    if (LIKELY(Runtime::Current()->IsStarted())) {
+      ScopedObjectAccess soa(self);
+      ScopedLocalRef<jobject> arg(self->GetJniEnv(),
+                                  soa.AddLocalReference<jobject>(cleared_references_.GetList()));
+      jvalue args[1];
+      args[0].l = arg.get();
+      InvokeWithJValues(soa, nullptr, WellKnownClasses::java_lang_ref_ReferenceQueue_add, args);
+    }
+    cleared_references_.Clear();
+  }
+}
+
+}  // namespace gc
+}  // namespace art
+
diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h
new file mode 100644
index 0000000..f082a9e
--- /dev/null
+++ b/runtime/gc/reference_processor.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_REFERENCE_PROCESSOR_H_
+#define ART_RUNTIME_GC_REFERENCE_PROCESSOR_H_
+
+#include "base/mutex.h"
+#include "globals.h"
+#include "jni.h"
+#include "object_callbacks.h"
+#include "reference_queue.h"
+
+namespace art {
+
+class TimingLogger;
+
+namespace mirror {
+class Object;
+class Reference;
+}  // namespace mirror
+
+namespace gc {
+
+class Heap;
+
+// Used to process java.lang.References concurrently or paused.
+class ReferenceProcessor {
+ public:
+  explicit ReferenceProcessor();
+  static mirror::Object* PreserveSoftReferenceCallback(mirror::Object* obj, void* arg);
+  void ProcessReferences(bool concurrent, TimingLogger* timings, bool clear_soft_references,
+                         IsMarkedCallback* is_marked_callback,
+                         MarkObjectCallback* mark_object_callback,
+                         ProcessMarkStackCallback* process_mark_stack_callback, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+      LOCKS_EXCLUDED(lock_);
+  // Only allow setting this with mutators suspended so that we can avoid using a lock in the
+  // GetReferent fast path as an optimization.
+  void EnableSlowPath() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+  // Decode the referent, may block if references are being processed.
+  mirror::Object* GetReferent(Thread* self, mirror::Reference* reference)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(lock_);
+  void EnqueueClearedReferences() LOCKS_EXCLUDED(Locks::mutator_lock_);
+  void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
+                              IsMarkedCallback is_marked_callback, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ private:
+  class ProcessReferencesArgs {
+   public:
+    ProcessReferencesArgs(IsMarkedCallback* is_marked_callback,
+                          MarkObjectCallback* mark_callback, void* arg)
+        : is_marked_callback_(is_marked_callback), mark_callback_(mark_callback), arg_(arg) {
+    }
+
+    // The is marked callback is null when the args aren't set up.
+    IsMarkedCallback* is_marked_callback_;
+    MarkObjectCallback* mark_callback_;
+    void* arg_;
+  };
+  // Called by ProcessReferences.
+  void DisableSlowPath(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+  // If we are preserving references it means that some dead objects may become live, we use start
+  // and stop preserving to block mutators using GetReferrent from getting access to these
+  // referents.
+  void StartPreservingReferences(Thread* self) LOCKS_EXCLUDED(lock_);
+  void StopPreservingReferences(Thread* self) LOCKS_EXCLUDED(lock_);
+  // Process args, used by the GetReferent to return referents which are already marked.
+  ProcessReferencesArgs process_references_args_ GUARDED_BY(lock_);
+  // Boolean for whether or not we need to go slow path in GetReferent.
+  volatile bool slow_path_enabled_;
+  // Boolean for whether or not we are preserving references (either soft references or finalizers).
+  // If this is true, then we cannot return a referent (see comment in GetReferent).
+  bool preserving_references_ GUARDED_BY(lock_);
+  // Lock that guards the reference processing.
+  Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  // Condition that people wait on if they attempt to get the referent of a reference while
+  // processing is in progress.
+  ConditionVariable condition_ GUARDED_BY(lock_);
+  // Reference queues used by the GC.
+  ReferenceQueue soft_reference_queue_;
+  ReferenceQueue weak_reference_queue_;
+  ReferenceQueue finalizer_reference_queue_;
+  ReferenceQueue phantom_reference_queue_;
+  ReferenceQueue cleared_references_;
+};
+
+}  // namespace gc
+}  // namespace art
+
+#endif  // ART_RUNTIME_GC_REFERENCE_PROCESSOR_H_
diff --git a/runtime/gc/reference_queue.cc b/runtime/gc/reference_queue.cc
index aee7891..d2bd9a4 100644
--- a/runtime/gc/reference_queue.cc
+++ b/runtime/gc/reference_queue.cc
@@ -131,8 +131,8 @@
 }
 
 void ReferenceQueue::EnqueueFinalizerReferences(ReferenceQueue& cleared_references,
-                                                IsMarkedCallback is_marked_callback,
-                                                MarkObjectCallback recursive_mark_callback,
+                                                IsMarkedCallback* is_marked_callback,
+                                                MarkObjectCallback* mark_object_callback,
                                                 void* arg) {
   while (!IsEmpty()) {
     mirror::FinalizerReference* ref = DequeuePendingReference()->AsFinalizerReference();
@@ -141,7 +141,7 @@
       mirror::Object* forward_address = is_marked_callback(referent, arg);
       // If the referent isn't marked, mark it and update the
       if (forward_address == nullptr) {
-        forward_address = recursive_mark_callback(referent, arg);
+        forward_address = mark_object_callback(referent, arg);
         // If the referent is non-null the reference must queuable.
         DCHECK(ref->IsEnqueuable());
         // Move the updated referent to the zombie field.
@@ -160,7 +160,7 @@
   }
 }
 
-void ReferenceQueue::PreserveSomeSoftReferences(IsMarkedCallback preserve_callback, void* arg) {
+void ReferenceQueue::PreserveSomeSoftReferences(IsMarkedCallback* preserve_callback, void* arg) {
   ReferenceQueue cleared;
   while (!IsEmpty()) {
     mirror::Reference* ref = DequeuePendingReference();
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index 8d392ba..4f223e2 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -59,8 +59,8 @@
   // Enqueues finalizer references with white referents.  White referents are blackened, moved to the
   // zombie field, and the referent field is cleared.
   void EnqueueFinalizerReferences(ReferenceQueue& cleared_references,
-                                  IsMarkedCallback is_marked_callback,
-                                  MarkObjectCallback recursive_mark_callback, void* arg)
+                                  IsMarkedCallback* is_marked_callback,
+                                  MarkObjectCallback* mark_object_callback, void* arg)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   // Walks the reference list marking any references subject to the reference clearing policy.
   // References with a black referent are removed from the list.  References with white referents
@@ -69,7 +69,8 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   // Unlink the reference list clearing references objects with white referents.  Cleared references
   // registered to a reference queue are scheduled for appending by the heap worker thread.
-  void ClearWhiteReferences(ReferenceQueue& cleared_references, IsMarkedCallback is_marked_callback,
+  void ClearWhiteReferences(ReferenceQueue& cleared_references,
+                            IsMarkedCallback* is_marked_callback,
                             void* arg)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void Dump(std::ostream& os) const
diff --git a/runtime/native/java_lang_ref_Reference.cc b/runtime/native/java_lang_ref_Reference.cc
new file mode 100644
index 0000000..f221ac6
--- /dev/null
+++ b/runtime/native/java_lang_ref_Reference.cc
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "gc/heap.h"
+#include "gc/reference_processor.h"
+#include "jni_internal.h"
+#include "mirror/object-inl.h"
+#include "mirror/reference-inl.h"
+#include "scoped_fast_native_object_access.h"
+
+namespace art {
+
+static jobject Reference_get(JNIEnv* env, jobject javaThis) {
+  ScopedFastNativeObjectAccess soa(env);
+  mirror::Reference* const ref = soa.Decode<mirror::Reference*>(javaThis);
+  mirror::Object* const referent =
+      Runtime::Current()->GetHeap()->GetReferenceProcessor()->GetReferent(soa.Self(), ref);
+  return soa.AddLocalReference<jobject>(referent);
+}
+
+static JNINativeMethod gMethods[] = {
+  NATIVE_METHOD(Reference, get, "!()Ljava/lang/Object;"),
+};
+
+void register_java_lang_ref_Reference(JNIEnv* env) {
+  REGISTER_NATIVE_METHODS("java/lang/ref/Reference");
+}
+
+}  // namespace art
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index cbd51d4..6d9dfa6 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -744,6 +744,7 @@
   REGISTER(register_java_lang_System);
   REGISTER(register_java_lang_Thread);
   REGISTER(register_java_lang_VMClassLoader);
+  REGISTER(register_java_lang_ref_Reference);
   REGISTER(register_java_lang_reflect_Array);
   REGISTER(register_java_lang_reflect_Constructor);
   REGISTER(register_java_lang_reflect_Field);