Add hash set

More memory efficient than libcxx since we do not box the values.

Change intern table to use new hash set. Clean up intern table by
removing const casts and deleting unnecessary code.

Changed the class linker to use a hash set, also added a pre-zygote
class table.

5 samples of:
adb shell stop && adb shell start && sleep 60 && adb shell dumpsys meminfo
Before:
165929 kB: Native
175859 kB: Native
168434 kB: Native
166559 kB: Native
169958 kB: Native

After:
160972 kB: Native
159439 kB: Native
157204 kB: Native
165093 kB: Native
163039 kB: Native

TODO: Add HashTable which is implemented by using a HashSet.
TODO: Use for DexFile::find_class_def_misses_.
TODO: Investigate using mem maps instead of native heap.

Bug: 17808975

Change-Id: I93e376cf6eb9628cf52f4aefdadb6157acfb799a
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index bf9e9ec..807c5cc 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -72,6 +72,7 @@
   runtime/barrier_test.cc \
   runtime/base/bit_field_test.cc \
   runtime/base/bit_vector_test.cc \
+  runtime/base/hash_set_test.cc \
   runtime/base/hex_dump_test.cc \
   runtime/base/histogram_test.cc \
   runtime/base/mutex_test.cc \
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 815bad3..0b304eb 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -321,7 +321,8 @@
 
   // Remove the undesired classes from the class roots.
   for (const std::string& it : non_image_classes) {
-    class_linker->RemoveClass(it.c_str(), NULL);
+    bool result = class_linker->RemoveClass(it.c_str(), NULL);
+    DCHECK(result);
   }
 
   // Clear references to removed classes from the DexCaches.
diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h
index a7adb02..da5ac29 100644
--- a/runtime/base/allocator.h
+++ b/runtime/base/allocator.h
@@ -91,7 +91,7 @@
 
 // Tracking allocator, tracks how much memory is used.
 template<class T, AllocatorTag kTag>
-class TrackingAllocatorImpl {
+class TrackingAllocatorImpl : public std::allocator<T> {
  public:
   typedef typename std::allocator<T>::value_type value_type;
   typedef typename std::allocator<T>::size_type size_type;
diff --git a/runtime/base/hash_set.h b/runtime/base/hash_set.h
new file mode 100644
index 0000000..992e5b1
--- /dev/null
+++ b/runtime/base/hash_set.h
@@ -0,0 +1,407 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_HASH_SET_H_
+#define ART_RUNTIME_BASE_HASH_SET_H_
+
+#include <functional>
+#include <memory>
+#include <stdint.h>
+#include <utility>
+
+#include "logging.h"
+
+namespace art {
+
+// Returns true if an item is empty.
+template <class T>
+class DefaultEmptyFn {
+ public:
+  void MakeEmpty(T& item) const {
+    item = T();
+  }
+  bool IsEmpty(const T& item) const {
+    return item == T();
+  }
+};
+
+template <class T>
+class DefaultEmptyFn<T*> {
+ public:
+  void MakeEmpty(T*& item) const {
+    item = nullptr;
+  }
+  bool IsEmpty(const T*& item) const {
+    return item == nullptr;
+  }
+};
+
+// Low memory version of a hash set, uses less memory than std::unordered_set since elements aren't
+// boxed. Uses linear probing.
+// EmptyFn needs to implement two functions MakeEmpty(T& item) and IsEmpty(const T& item)
+template <class T, class EmptyFn = DefaultEmptyFn<T>, class HashFn = std::hash<T>,
+    class Pred = std::equal_to<T>, class Alloc = std::allocator<T>>
+class HashSet {
+ public:
+  static constexpr double kDefaultMinLoadFactor = 0.5;
+  static constexpr double kDefaultMaxLoadFactor = 0.9;
+  static constexpr size_t kMinBuckets = 1000;
+
+  class Iterator {
+   public:
+    Iterator(const Iterator&) = default;
+    Iterator(HashSet* hash_set, size_t index) : hash_set_(hash_set), index_(index) {
+    }
+    Iterator& operator=(const Iterator&) = default;
+    bool operator==(const Iterator& other) const {
+      return hash_set_ == other.hash_set_ && index_ == other.index_;
+    }
+    bool operator!=(const Iterator& other) const {
+      return !(*this == other);
+    }
+    Iterator operator++() {  // Value after modification.
+      index_ = NextNonEmptySlot(index_);
+      return *this;
+    }
+    Iterator operator++(int) {
+      Iterator temp = *this;
+      index_ = NextNonEmptySlot(index_);
+      return temp;
+    }
+    T& operator*() {
+      DCHECK(!hash_set_->IsFreeSlot(GetIndex()));
+      return hash_set_->ElementForIndex(index_);
+    }
+    const T& operator*() const {
+      DCHECK(!hash_set_->IsFreeSlot(GetIndex()));
+      return hash_set_->ElementForIndex(index_);
+    }
+    T* operator->() {
+      return &**this;
+    }
+    const T* operator->() const {
+      return &**this;
+    }
+    // TODO: Operator -- --(int)
+
+   private:
+    HashSet* hash_set_;
+    size_t index_;
+
+    size_t GetIndex() const {
+      return index_;
+    }
+    size_t NextNonEmptySlot(size_t index) const {
+      const size_t num_buckets = hash_set_->NumBuckets();
+      DCHECK_LT(index, num_buckets);
+      do {
+        ++index;
+      } while (index < num_buckets && hash_set_->IsFreeSlot(index));
+      return index;
+    }
+
+    friend class HashSet;
+  };
+
+  void Clear() {
+    DeallocateStorage();
+    AllocateStorage(1);
+    num_elements_ = 0;
+    elements_until_expand_ = 0;
+  }
+  HashSet() : num_elements_(0), num_buckets_(0), data_(nullptr),
+      min_load_factor_(kDefaultMinLoadFactor), max_load_factor_(kDefaultMaxLoadFactor) {
+    Clear();
+  }
+  HashSet(const HashSet& other) : num_elements_(0), num_buckets_(0), data_(nullptr) {
+    *this = other;
+  }
+  HashSet(HashSet&& other) : num_elements_(0), num_buckets_(0), data_(nullptr) {
+    *this = std::move(other);
+  }
+  ~HashSet() {
+    DeallocateStorage();
+  }
+  HashSet& operator=(HashSet&& other) {
+    std::swap(data_, other.data_);
+    std::swap(num_buckets_, other.num_buckets_);
+    std::swap(num_elements_, other.num_elements_);
+    std::swap(elements_until_expand_, other.elements_until_expand_);
+    std::swap(min_load_factor_, other.min_load_factor_);
+    std::swap(max_load_factor_, other.max_load_factor_);
+    return *this;
+  }
+  HashSet& operator=(const HashSet& other) {
+    DeallocateStorage();
+    AllocateStorage(other.NumBuckets());
+    for (size_t i = 0; i < num_buckets_; ++i) {
+      ElementForIndex(i) = other.data_[i];
+    }
+    num_elements_ = other.num_elements_;
+    elements_until_expand_ = other.elements_until_expand_;
+    min_load_factor_ = other.min_load_factor_;
+    max_load_factor_ = other.max_load_factor_;
+    return *this;
+  }
+  // Lower case for c++11 for each.
+  Iterator begin() {
+    Iterator ret(this, 0);
+    if (num_buckets_ != 0 && IsFreeSlot(ret.GetIndex())) {
+      ++ret;  // Skip all the empty slots.
+    }
+    return ret;
+  }
+  // Lower case for c++11 for each.
+  Iterator end() {
+    return Iterator(this, NumBuckets());
+  }
+  bool Empty() {
+    return begin() == end();
+  }
+  // Erase algorithm:
+  // Make an empty slot where the iterator is pointing.
+  // Scan fowards until we hit another empty slot.
+  // If an element inbetween doesn't rehash to the range from the current empty slot to the
+  // iterator. It must be before the empty slot, in that case we can move it to the empty slot
+  // and set the empty slot to be the location we just moved from.
+  // Relies on maintaining the invariant that there's no empty slots from the 'ideal' index of an
+  // element to its actual location/index.
+  Iterator Erase(Iterator it) {
+    // empty_index is the index that will become empty.
+    size_t empty_index = it.GetIndex();
+    DCHECK(!IsFreeSlot(empty_index));
+    size_t next_index = empty_index;
+    bool filled = false;  // True if we filled the empty index.
+    while (true) {
+      next_index = NextIndex(next_index);
+      T& next_element = ElementForIndex(next_index);
+      // If the next element is empty, we are done. Make sure to clear the current empty index.
+      if (emptyfn_.IsEmpty(next_element)) {
+        emptyfn_.MakeEmpty(ElementForIndex(empty_index));
+        break;
+      }
+      // Otherwise try to see if the next element can fill the current empty index.
+      const size_t next_hash = hashfn_(next_element);
+      // Calculate the ideal index, if it is within empty_index + 1 to next_index then there is
+      // nothing we can do.
+      size_t next_ideal_index = IndexForHash(next_hash);
+      // Loop around if needed for our check.
+      size_t unwrapped_next_index = next_index;
+      if (unwrapped_next_index < empty_index) {
+        unwrapped_next_index += NumBuckets();
+      }
+      // Loop around if needed for our check.
+      size_t unwrapped_next_ideal_index = next_ideal_index;
+      if (unwrapped_next_ideal_index < empty_index) {
+        unwrapped_next_ideal_index += NumBuckets();
+      }
+      if (unwrapped_next_ideal_index <= empty_index ||
+          unwrapped_next_ideal_index > unwrapped_next_index) {
+        // If the target index isn't within our current range it must have been probed from before
+        // the empty index.
+        ElementForIndex(empty_index) = std::move(next_element);
+        filled = true;  // TODO: Optimize
+        empty_index = next_index;
+      }
+    }
+    --num_elements_;
+    // If we didn't fill the slot then we need go to the next non free slot.
+    if (!filled) {
+      ++it;
+    }
+    return it;
+  }
+  // Find an element, returns end() if not found.
+  // Allows custom K types, example of when this is useful.
+  // Set of Class* sorted by name, want to find a class with a name but can't allocate a dummy
+  // object in the heap for performance solution.
+  template <typename K>
+  Iterator Find(const K& element) {
+    return FindWithHash(element, hashfn_(element));
+  }
+  template <typename K>
+  Iterator FindWithHash(const K& element, size_t hash) {
+    DCHECK_EQ(hashfn_(element), hash);
+    size_t index = IndexForHash(hash);
+    while (true) {
+      T& slot = ElementForIndex(index);
+      if (emptyfn_.IsEmpty(slot)) {
+        return end();
+      }
+      if (pred_(slot, element)) {
+        return Iterator(this, index);
+      }
+      index = NextIndex(index);
+    }
+  }
+  // Insert an element, allows duplicates.
+  void Insert(const T& element) {
+    InsertWithHash(element, hashfn_(element));
+  }
+  void InsertWithHash(const T& element, size_t hash) {
+    DCHECK_EQ(hash, hashfn_(element));
+    if (num_elements_ >= elements_until_expand_) {
+      Expand();
+      DCHECK_LT(num_elements_, elements_until_expand_);
+    }
+    const size_t index = FirstAvailableSlot(IndexForHash(hash));
+    data_[index] = element;
+    ++num_elements_;
+  }
+  size_t Size() const {
+    return num_elements_;
+  }
+  void ShrinkToMaximumLoad() {
+    Resize(Size() / max_load_factor_);
+  }
+  // To distance that inserted elements were probed. Used for measuring how good hash functions
+  // are.
+  size_t TotalProbeDistance() const {
+    size_t total = 0;
+    for (size_t i = 0; i < NumBuckets(); ++i) {
+      const T& element = ElementForIndex(i);
+      if (!emptyfn_.IsEmpty(element)) {
+        size_t ideal_location = IndexForHash(hashfn_(element));
+        if (ideal_location > i) {
+          total += i + NumBuckets() - ideal_location;
+        } else {
+          total += i - ideal_location;
+        }
+      }
+    }
+    return total;
+  }
+  // Calculate the current load factor and return it.
+  double CalculateLoadFactor() const {
+    return static_cast<double>(Size()) / static_cast<double>(NumBuckets());
+  }
+  // Make sure that everything reinserts in the right spot. Returns the number of errors.
+  size_t Verify() {
+    size_t errors = 0;
+    for (size_t i = 0; i < num_buckets_; ++i) {
+      T& element = data_[i];
+      if (!emptyfn_.IsEmpty(element)) {
+        T temp;
+        emptyfn_.MakeEmpty(temp);
+        std::swap(temp, element);
+        size_t first_slot = FirstAvailableSlot(IndexForHash(hashfn_(temp)));
+        if (i != first_slot) {
+          LOG(ERROR) << "Element " << i << " should be in slot " << first_slot;
+          ++errors;
+        }
+        std::swap(temp, element);
+      }
+    }
+    return errors;
+  }
+
+ private:
+  T& ElementForIndex(size_t index) {
+    DCHECK_LT(index, NumBuckets());
+    DCHECK(data_ != nullptr);
+    return data_[index];
+  }
+  const T& ElementForIndex(size_t index) const {
+    DCHECK_LT(index, NumBuckets());
+    DCHECK(data_ != nullptr);
+    return data_[index];
+  }
+  size_t IndexForHash(size_t hash) const {
+    return hash % num_buckets_;
+  }
+  size_t NextIndex(size_t index) const {
+    if (UNLIKELY(++index >= num_buckets_)) {
+      DCHECK_EQ(index, NumBuckets());
+      return 0;
+    }
+    return index;
+  }
+  bool IsFreeSlot(size_t index) const {
+    return emptyfn_.IsEmpty(ElementForIndex(index));
+  }
+  size_t NumBuckets() const {
+    return num_buckets_;
+  }
+  // Allocate a number of buckets.
+  void AllocateStorage(size_t num_buckets) {
+    num_buckets_ = num_buckets;
+    data_ = allocfn_.allocate(num_buckets_);
+    for (size_t i = 0; i < num_buckets_; ++i) {
+      allocfn_.construct(allocfn_.address(data_[i]));
+      emptyfn_.MakeEmpty(data_[i]);
+    }
+  }
+  void DeallocateStorage() {
+    if (num_buckets_ != 0) {
+      for (size_t i = 0; i < NumBuckets(); ++i) {
+        allocfn_.destroy(allocfn_.address(data_[i]));
+      }
+      allocfn_.deallocate(data_, NumBuckets());
+      data_ = nullptr;
+      num_buckets_ = 0;
+    }
+  }
+  // Expand the set based on the load factors.
+  void Expand() {
+    size_t min_index = static_cast<size_t>(Size() / min_load_factor_);
+    if (min_index < kMinBuckets) {
+      min_index = kMinBuckets;
+    }
+    // Resize based on the minimum load factor.
+    Resize(min_index);
+    // When we hit elements_until_expand_, we are at the max load factor and must expand again.
+    elements_until_expand_ = NumBuckets() * max_load_factor_;
+  }
+  // Expand / shrink the table to the new specified size.
+  void Resize(size_t new_size) {
+    DCHECK_GE(new_size, Size());
+    T* old_data = data_;
+    size_t old_num_buckets = num_buckets_;
+    // Reinsert all of the old elements.
+    AllocateStorage(new_size);
+    for (size_t i = 0; i < old_num_buckets; ++i) {
+      T& element = old_data[i];
+      if (!emptyfn_.IsEmpty(element)) {
+        data_[FirstAvailableSlot(IndexForHash(hashfn_(element)))] = std::move(element);
+      }
+      allocfn_.destroy(allocfn_.address(element));
+    }
+    allocfn_.deallocate(old_data, old_num_buckets);
+  }
+  ALWAYS_INLINE size_t FirstAvailableSlot(size_t index) const {
+    while (!emptyfn_.IsEmpty(data_[index])) {
+      index = NextIndex(index);
+    }
+    return index;
+  }
+
+  Alloc allocfn_;  // Allocator function.
+  HashFn hashfn_;  // Hashing function.
+  EmptyFn emptyfn_;  // IsEmpty/SetEmpty function.
+  Pred pred_;  // Equals function.
+  size_t num_elements_;  // Number of inserted elements.
+  size_t num_buckets_;  // Number of hash table buckets.
+  size_t elements_until_expand_;  // Maxmimum number of elements until we expand the table.
+  T* data_;  // Backing storage.
+  double min_load_factor_;
+  double max_load_factor_;
+
+  friend class Iterator;
+};
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_BASE_HASH_SET_H_
diff --git a/runtime/base/hash_set_test.cc b/runtime/base/hash_set_test.cc
new file mode 100644
index 0000000..ee2d0b3
--- /dev/null
+++ b/runtime/base/hash_set_test.cc
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "hash_set.h"
+
+#include <map>
+#include <sstream>
+#include <string>
+#include <unordered_set>
+
+#include "common_runtime_test.h"
+#include "utils.h"
+
+namespace art {
+
+class IsEmptyFnString {
+ public:
+  void MakeEmpty(std::string& item) const {
+    item.clear();
+  }
+  bool IsEmpty(const std::string& item) const {
+    return item.empty();
+  }
+};
+
+class HashSetTest : public CommonRuntimeTest {
+ public:
+  HashSetTest() : seed_(97421), unique_number_(0) {
+  }
+  std::string RandomString(size_t len) {
+    std::ostringstream oss;
+    for (size_t i = 0; i < len; ++i) {
+      oss << static_cast<char>('A' + PRand() % 64);
+    }
+    COMPILE_ASSERT(' ' < 'A', must_be_less_than_a);
+    oss << " " << unique_number_++;  // Relies on ' ' < 'A'
+    return oss.str();
+  }
+  void SetSeed(size_t seed) {
+    seed_ = seed;
+  }
+  size_t PRand() {  // Pseudo random.
+    seed_ = seed_ * 1103515245 + 12345;
+    return seed_;
+  }
+
+ private:
+  size_t seed_;
+  size_t unique_number_;
+};
+
+TEST_F(HashSetTest, TestSmoke) {
+  HashSet<std::string, IsEmptyFnString> hash_set;
+  const std::string test_string = "hello world 1234";
+  ASSERT_TRUE(hash_set.Empty());
+  ASSERT_EQ(hash_set.Size(), 0U);
+  hash_set.Insert(test_string);
+  auto it = hash_set.Find(test_string);
+  ASSERT_EQ(*it, test_string);
+  auto after_it = hash_set.Erase(it);
+  ASSERT_TRUE(after_it == hash_set.end());
+  ASSERT_TRUE(hash_set.Empty());
+  ASSERT_EQ(hash_set.Size(), 0U);
+  it = hash_set.Find(test_string);
+  ASSERT_TRUE(it == hash_set.end());
+}
+
+TEST_F(HashSetTest, TestInsertAndErase) {
+  HashSet<std::string, IsEmptyFnString> hash_set;
+  static constexpr size_t count = 1000;
+  std::vector<std::string> strings;
+  for (size_t i = 0; i < count; ++i) {
+    // Insert a bunch of elements and make sure we can find them.
+    strings.push_back(RandomString(10));
+    hash_set.Insert(strings[i]);
+    auto it = hash_set.Find(strings[i]);
+    ASSERT_TRUE(it != hash_set.end());
+    ASSERT_EQ(*it, strings[i]);
+  }
+  ASSERT_EQ(strings.size(), hash_set.Size());
+  // Try to erase the odd strings.
+  for (size_t i = 1; i < count; i += 2) {
+    auto it = hash_set.Find(strings[i]);
+    ASSERT_TRUE(it != hash_set.end());
+    ASSERT_EQ(*it, strings[i]);
+    hash_set.Erase(it);
+  }
+  // Test removed.
+  for (size_t i = 1; i < count; i += 2) {
+    auto it = hash_set.Find(strings[i]);
+    ASSERT_TRUE(it == hash_set.end());
+  }
+  for (size_t i = 0; i < count; i += 2) {
+    auto it = hash_set.Find(strings[i]);
+    ASSERT_TRUE(it != hash_set.end());
+    ASSERT_EQ(*it, strings[i]);
+  }
+}
+
+TEST_F(HashSetTest, TestIterator) {
+  HashSet<std::string, IsEmptyFnString> hash_set;
+  ASSERT_TRUE(hash_set.begin() == hash_set.end());
+  static constexpr size_t count = 1000;
+  std::vector<std::string> strings;
+  for (size_t i = 0; i < count; ++i) {
+    // Insert a bunch of elements and make sure we can find them.
+    strings.push_back(RandomString(10));
+    hash_set.Insert(strings[i]);
+  }
+  // Make sure we visit each string exactly once.
+  std::map<std::string, size_t> found_count;
+  for (const std::string& s : hash_set) {
+    ++found_count[s];
+  }
+  for (size_t i = 0; i < count; ++i) {
+    ASSERT_EQ(found_count[strings[i]], 1U);
+  }
+  found_count.clear();
+  // Remove all the elements with iterator erase.
+  for (auto it = hash_set.begin(); it != hash_set.end();) {
+    ++found_count[*it];
+    it = hash_set.Erase(it);
+    ASSERT_EQ(hash_set.Verify(), 0U);
+  }
+  for (size_t i = 0; i < count; ++i) {
+    ASSERT_EQ(found_count[strings[i]], 1U);
+  }
+}
+
+TEST_F(HashSetTest, TestSwap) {
+  HashSet<std::string, IsEmptyFnString> hash_seta, hash_setb;
+  std::vector<std::string> strings;
+  static constexpr size_t count = 1000;
+  for (size_t i = 0; i < count; ++i) {
+    strings.push_back(RandomString(10));
+    hash_seta.Insert(strings[i]);
+  }
+  std::swap(hash_seta, hash_setb);
+  hash_seta.Insert("TEST");
+  hash_setb.Insert("TEST2");
+  for (size_t i = 0; i < count; ++i) {
+    strings.push_back(RandomString(10));
+    hash_seta.Insert(strings[i]);
+  }
+}
+
+TEST_F(HashSetTest, TestStress) {
+  HashSet<std::string, IsEmptyFnString> hash_set;
+  std::unordered_multiset<std::string> std_set;
+  std::vector<std::string> strings;
+  static constexpr size_t string_count = 2000;
+  static constexpr size_t operations = 100000;
+  static constexpr size_t target_size = 5000;
+  for (size_t i = 0; i < string_count; ++i) {
+    strings.push_back(RandomString(i % 10 + 1));
+  }
+  const size_t seed = time(nullptr);
+  SetSeed(seed);
+  LOG(INFO) << "Starting stress test with seed " << seed;
+  for (size_t i = 0; i < operations; ++i) {
+    ASSERT_EQ(hash_set.Size(), std_set.size());
+    size_t delta = std::abs(static_cast<ssize_t>(target_size) -
+                            static_cast<ssize_t>(hash_set.Size()));
+    size_t n = PRand();
+    if (n % target_size == 0) {
+      hash_set.Clear();
+      std_set.clear();
+      ASSERT_TRUE(hash_set.Empty());
+      ASSERT_TRUE(std_set.empty());
+    } else  if (n % target_size < delta) {
+      // Skew towards adding elements until we are at the desired size.
+      const std::string& s = strings[PRand() % string_count];
+      hash_set.Insert(s);
+      std_set.insert(s);
+      ASSERT_EQ(*hash_set.Find(s), *std_set.find(s));
+    } else {
+      const std::string& s = strings[PRand() % string_count];
+      auto it1 = hash_set.Find(s);
+      auto it2 = std_set.find(s);
+      ASSERT_EQ(it1 == hash_set.end(), it2 == std_set.end());
+      if (it1 != hash_set.end()) {
+        ASSERT_EQ(*it1, *it2);
+        hash_set.Erase(it1);
+        std_set.erase(it2);
+      }
+    }
+  }
+}
+
+}  // namespace art
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index eb28d34..3704d83 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1676,25 +1676,24 @@
 void ClassLinker::VisitClassRoots(RootCallback* callback, void* arg, VisitRootFlags flags) {
   WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
   if ((flags & kVisitRootFlagAllRoots) != 0) {
-    for (std::pair<const size_t, GcRoot<mirror::Class> >& it : class_table_) {
-      it.second.VisitRoot(callback, arg, 0, kRootStickyClass);
+    for (GcRoot<mirror::Class>& root : class_table_) {
+      root.VisitRoot(callback, arg, 0, kRootStickyClass);
+    }
+    for (GcRoot<mirror::Class>& root : pre_zygote_class_table_) {
+      root.VisitRoot(callback, arg, 0, kRootStickyClass);
     }
   } else if ((flags & kVisitRootFlagNewRoots) != 0) {
-    for (auto& pair : new_class_roots_) {
-      mirror::Class* old_ref = pair.second.Read<kWithoutReadBarrier>();
-      pair.second.VisitRoot(callback, arg, 0, kRootStickyClass);
-      mirror::Class* new_ref = pair.second.Read<kWithoutReadBarrier>();
+    for (auto& root : new_class_roots_) {
+      mirror::Class* old_ref = root.Read<kWithoutReadBarrier>();
+      root.VisitRoot(callback, arg, 0, kRootStickyClass);
+      mirror::Class* new_ref = root.Read<kWithoutReadBarrier>();
       if (UNLIKELY(new_ref != old_ref)) {
         // Uh ohes, GC moved a root in the log. Need to search the class_table and update the
         // corresponding object. This is slow, but luckily for us, this may only happen with a
         // concurrent moving GC.
-        for (auto it = class_table_.lower_bound(pair.first), end = class_table_.end();
-            it != end && it->first == pair.first; ++it) {
-          // If the class stored matches the old class, update it to the new value.
-          if (old_ref == it->second.Read<kWithoutReadBarrier>()) {
-            it->second = GcRoot<mirror::Class>(new_ref);
-          }
-        }
+        auto it = class_table_.Find(GcRoot<mirror::Class>(old_ref));
+        class_table_.Erase(it);
+        class_table_.Insert(GcRoot<mirror::Class>(new_ref));
       }
     }
   }
@@ -1752,9 +1751,13 @@
   }
   // TODO: why isn't this a ReaderMutexLock?
   WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
-  for (std::pair<const size_t, GcRoot<mirror::Class> >& it : class_table_) {
-    mirror::Class* c = it.second.Read();
-    if (!visitor(c, arg)) {
+  for (GcRoot<mirror::Class>& root : class_table_) {
+    if (!visitor(root.Read(), arg)) {
+      return;
+    }
+  }
+  for (GcRoot<mirror::Class>& root : pre_zygote_class_table_) {
+    if (!visitor(root.Read(), arg)) {
       return;
     }
   }
@@ -1810,7 +1813,7 @@
       size_t class_table_size;
       {
         ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
-        class_table_size = class_table_.size();
+        class_table_size = class_table_.Size() + pre_zygote_class_table_.Size();
       }
       mirror::Class* class_type = mirror::Class::GetJavaLangClass();
       mirror::Class* array_of_class = FindArrayClass(self, &class_type);
@@ -3235,8 +3238,7 @@
     LOG(INFO) << "Loaded class " << descriptor << source;
   }
   WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
-  mirror::Class* existing =
-      LookupClassFromTableLocked(descriptor, klass->GetClassLoader(), hash);
+  mirror::Class* existing = LookupClassFromTableLocked(descriptor, klass->GetClassLoader(), hash);
   if (existing != nullptr) {
     return existing;
   }
@@ -3246,13 +3248,13 @@
     // is in the image.
     existing = LookupClassFromImage(descriptor);
     if (existing != nullptr) {
-      CHECK(klass == existing);
+      CHECK_EQ(klass, existing);
     }
   }
   VerifyObject(klass);
-  class_table_.insert(std::make_pair(hash, GcRoot<mirror::Class>(klass)));
+  class_table_.InsertWithHash(GcRoot<mirror::Class>(klass), hash);
   if (log_new_class_table_roots_) {
-    new_class_roots_.push_back(std::make_pair(hash, GcRoot<mirror::Class>(klass)));
+    new_class_roots_.push_back(GcRoot<mirror::Class>(klass));
   }
   return nullptr;
 }
@@ -3272,14 +3274,8 @@
   CHECK(!existing->IsResolved()) << descriptor;
   CHECK_EQ(klass->GetStatus(), mirror::Class::kStatusResolving) << descriptor;
 
-  for (auto it = class_table_.lower_bound(hash), end = class_table_.end();
-       it != end && it->first == hash; ++it) {
-    mirror::Class* klass = it->second.Read();
-    if (klass == existing) {
-      class_table_.erase(it);
-      break;
-    }
-  }
+  auto it = class_table_.Find(descriptor);
+  CHECK(it != class_table_.end());
 
   CHECK(!klass->IsTemp()) << descriptor;
   if (kIsDebugBuild && klass->GetClassLoader() == nullptr &&
@@ -3288,36 +3284,37 @@
     // is in the image.
     existing = LookupClassFromImage(descriptor);
     if (existing != nullptr) {
-      CHECK(klass == existing) << descriptor;
+      CHECK_EQ(klass, existing) << descriptor;
     }
   }
   VerifyObject(klass);
 
-  class_table_.insert(std::make_pair(hash, GcRoot<mirror::Class>(klass)));
+  // Update the element in the hash set.
+  *it = GcRoot<mirror::Class>(klass);
   if (log_new_class_table_roots_) {
-    new_class_roots_.push_back(std::make_pair(hash, GcRoot<mirror::Class>(klass)));
+    new_class_roots_.push_back(GcRoot<mirror::Class>(klass));
   }
 
   return existing;
 }
 
-bool ClassLinker::RemoveClass(const char* descriptor, const mirror::ClassLoader* class_loader) {
-  size_t hash = Hash(descriptor);
+bool ClassLinker::RemoveClass(const char* descriptor, mirror::ClassLoader* class_loader) {
   WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
-  for (auto it = class_table_.lower_bound(hash), end = class_table_.end();
-       it != end && it->first == hash;
-       ++it) {
-    mirror::Class* klass = it->second.Read();
-    if (klass->GetClassLoader() == class_loader && klass->DescriptorEquals(descriptor)) {
-      class_table_.erase(it);
-      return true;
-    }
+  auto pair = std::make_pair(descriptor, class_loader);
+  auto it = class_table_.Find(pair);
+  if (it != class_table_.end()) {
+    class_table_.Erase(it);
+    return true;
+  }
+  it = pre_zygote_class_table_.Find(pair);
+  if (it != pre_zygote_class_table_.end()) {
+    pre_zygote_class_table_.Erase(it);
+    return true;
   }
   return false;
 }
 
-mirror::Class* ClassLinker::LookupClass(const char* descriptor,
-                                        const mirror::ClassLoader* class_loader) {
+mirror::Class* ClassLinker::LookupClass(const char* descriptor, mirror::ClassLoader* class_loader) {
   size_t hash = Hash(descriptor);
   {
     ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
@@ -3347,26 +3344,17 @@
 }
 
 mirror::Class* ClassLinker::LookupClassFromTableLocked(const char* descriptor,
-                                                       const mirror::ClassLoader* class_loader,
+                                                       mirror::ClassLoader* class_loader,
                                                        size_t hash) {
-  auto end = class_table_.end();
-  for (auto it = class_table_.lower_bound(hash); it != end && it->first == hash; ++it) {
-    mirror::Class* klass = it->second.Read();
-    if (klass->GetClassLoader() == class_loader && klass->DescriptorEquals(descriptor)) {
-      if (kIsDebugBuild) {
-        // Check for duplicates in the table.
-        for (++it; it != end && it->first == hash; ++it) {
-          mirror::Class* klass2 = it->second.Read();
-          CHECK(!(klass2->GetClassLoader() == class_loader &&
-              klass2->DescriptorEquals(descriptor)))
-              << PrettyClass(klass) << " " << klass << " " << klass->GetClassLoader() << " "
-              << PrettyClass(klass2) << " " << klass2 << " " << klass2->GetClassLoader();
-        }
-      }
-      return klass;
+  auto descriptor_pair = std::make_pair(descriptor, class_loader);
+  auto it = pre_zygote_class_table_.FindWithHash(descriptor_pair, hash);
+  if (it == pre_zygote_class_table_.end()) {
+    it = class_table_.FindWithHash(descriptor_pair, hash);
+    if (it == class_table_.end()) {
+      return nullptr;
     }
   }
-  return nullptr;
+  return it->Read();
 }
 
 static mirror::ObjectArray<mirror::DexCache>* GetImageDexCaches()
@@ -3398,12 +3386,12 @@
         size_t hash = Hash(descriptor);
         mirror::Class* existing = LookupClassFromTableLocked(descriptor, nullptr, hash);
         if (existing != nullptr) {
-          CHECK(existing == klass) << PrettyClassAndClassLoader(existing) << " != "
+          CHECK_EQ(existing, klass) << PrettyClassAndClassLoader(existing) << " != "
               << PrettyClassAndClassLoader(klass);
         } else {
-          class_table_.insert(std::make_pair(hash, GcRoot<mirror::Class>(klass)));
+          class_table_.Insert(GcRoot<mirror::Class>(klass));
           if (log_new_class_table_roots_) {
-            new_class_roots_.push_back(std::make_pair(hash, GcRoot<mirror::Class>(klass)));
+            new_class_roots_.push_back(GcRoot<mirror::Class>(klass));
           }
         }
       }
@@ -3413,6 +3401,13 @@
   self->EndAssertNoThreadSuspension(old_no_suspend_cause);
 }
 
+void ClassLinker::MoveClassTableToPreZygote() {
+  WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+  DCHECK(pre_zygote_class_table_.Empty());
+  pre_zygote_class_table_ = std::move(class_table_);
+  class_table_.Clear();
+}
+
 mirror::Class* ClassLinker::LookupClassFromImage(const char* descriptor) {
   Thread* self = Thread::Current();
   const char* old_no_suspend_cause =
@@ -3445,14 +3440,32 @@
   if (dex_cache_image_class_lookup_required_) {
     MoveImageClassesToClassTable();
   }
-  size_t hash = Hash(descriptor);
-  ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
-  for (auto it = class_table_.lower_bound(hash), end = class_table_.end();
-      it != end && it->first == hash; ++it) {
-    mirror::Class* klass = it->second.Read();
-    if (klass->DescriptorEquals(descriptor)) {
-      result.push_back(klass);
+  WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
+  while (true) {
+    auto it = class_table_.Find(descriptor);
+    if (it == class_table_.end()) {
+      break;
     }
+    result.push_back(it->Read());
+    class_table_.Erase(it);
+  }
+  for (mirror::Class* k : result) {
+    class_table_.Insert(GcRoot<mirror::Class>(k));
+  }
+  size_t pre_zygote_start = result.size();
+  // Now handle the pre zygote table.
+  // Note: This dirties the pre-zygote table but shouldn't be an issue since LookupClasses is only
+  // called from the debugger.
+  while (true) {
+    auto it = pre_zygote_class_table_.Find(descriptor);
+    if (it == pre_zygote_class_table_.end()) {
+      break;
+    }
+    result.push_back(it->Read());
+    pre_zygote_class_table_.Erase(it);
+  }
+  for (size_t i = pre_zygote_start; i < result.size(); ++i) {
+    pre_zygote_class_table_.Insert(GcRoot<mirror::Class>(result[i]));
   }
 }
 
@@ -5668,9 +5681,8 @@
   std::vector<mirror::Class*> all_classes;
   {
     ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
-    for (std::pair<const size_t, GcRoot<mirror::Class> >& it : class_table_) {
-      mirror::Class* klass = it.second.Read();
-      all_classes.push_back(klass);
+    for (GcRoot<mirror::Class>& it : class_table_) {
+      all_classes.push_back(it.Read());
     }
   }
 
@@ -5684,7 +5696,8 @@
     MoveImageClassesToClassTable();
   }
   ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
-  os << "Loaded classes: " << class_table_.size() << " allocated classes\n";
+  os << "Zygote loaded classes=" << pre_zygote_class_table_.Size() << " post zygote classes="
+     << class_table_.Size() << "\n";
 }
 
 size_t ClassLinker::NumLoadedClasses() {
@@ -5692,7 +5705,8 @@
     MoveImageClassesToClassTable();
   }
   ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
-  return class_table_.size();
+  // Only return non zygote classes since these are the ones which apps which care about.
+  return class_table_.Size();
 }
 
 pid_t ClassLinker::GetClassesLockOwner() {
@@ -5715,4 +5729,41 @@
   class_roots->Set<false>(class_root, klass);
 }
 
+std::size_t ClassLinker::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& root)
+    const {
+  std::string temp;
+  return Hash(root.Read()->GetDescriptor(&temp));
+}
+
+bool ClassLinker::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& a,
+                                                        const GcRoot<mirror::Class>& b) {
+  if (a.Read()->GetClassLoader() != b.Read()->GetClassLoader()) {
+    return false;
+  }
+  std::string temp;
+  return a.Read()->DescriptorEquals(b.Read()->GetDescriptor(&temp));
+}
+
+std::size_t ClassLinker::ClassDescriptorHashEquals::operator()(
+    const std::pair<const char*, mirror::ClassLoader*>& element) const {
+  return Hash(element.first);
+}
+
+bool ClassLinker::ClassDescriptorHashEquals::operator()(
+    const GcRoot<mirror::Class>& a, const std::pair<const char*, mirror::ClassLoader*>& b) {
+  if (a.Read()->GetClassLoader() != b.second) {
+    return false;
+  }
+  return a.Read()->DescriptorEquals(b.first);
+}
+
+bool ClassLinker::ClassDescriptorHashEquals::operator()(const GcRoot<mirror::Class>& a,
+                                                        const char* descriptor) {
+  return a.Read()->DescriptorEquals(descriptor);
+}
+
+std::size_t ClassLinker::ClassDescriptorHashEquals::operator()(const char* descriptor) const {
+  return Hash(descriptor);
+}
+
 }  // namespace art
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 9e86aa2..5fdf364 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -22,6 +22,7 @@
 #include <vector>
 
 #include "base/allocator.h"
+#include "base/hash_set.h"
 #include "base/macros.h"
 #include "base/mutex.h"
 #include "dex_file.h"
@@ -101,7 +102,7 @@
 
   // Finds a class by its descriptor, returning NULL if it isn't wasn't loaded
   // by the given 'class_loader'.
-  mirror::Class* LookupClass(const char* descriptor, const mirror::ClassLoader* class_loader)
+  mirror::Class* LookupClass(const char* descriptor, mirror::ClassLoader* class_loader)
       LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -114,7 +115,7 @@
 
   // General class unloading is not supported, this is used to prune
   // unwanted classes during image writing.
-  bool RemoveClass(const char* descriptor, const mirror::ClassLoader* class_loader)
+  bool RemoveClass(const char* descriptor, mirror::ClassLoader* class_loader)
       LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -403,6 +404,17 @@
     return class_roots;
   }
 
+  // Move all of the image classes into the class table for faster lookups.
+  void MoveImageClassesToClassTable()
+      LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  // Move the class table to the pre-zygote table to reduce memory usage. This works by ensuring
+  // that no more classes are ever added to the pre zygote table which makes it that the pages
+  // always remain shared dirty instead of private dirty.
+  void MoveClassTableToPreZygote()
+      LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
  private:
   bool FindOatMethodFor(mirror::ArtMethod* method, OatFile::OatMethod* oat_method)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -645,7 +657,7 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   mirror::Class* LookupClassFromTableLocked(const char* descriptor,
-                                            const mirror::ClassLoader* class_loader,
+                                            mirror::ClassLoader* class_loader,
                                             size_t hash)
       SHARED_LOCKS_REQUIRED(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
 
@@ -653,9 +665,6 @@
       LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void MoveImageClassesToClassTable() LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
-      LOCKS_EXCLUDED(Locks::classlinker_classes_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   mirror::Class* LookupClassFromImage(const char* descriptor)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -678,15 +687,43 @@
   std::vector<GcRoot<mirror::DexCache>> dex_caches_ GUARDED_BY(dex_lock_);
   std::vector<const OatFile*> oat_files_ GUARDED_BY(dex_lock_);
 
+  class ClassDescriptorHashEquals {
+   public:
+    // Same class loader and descriptor.
+    std::size_t operator()(const GcRoot<mirror::Class>& root) const NO_THREAD_SAFETY_ANALYSIS;
+    bool operator()(const GcRoot<mirror::Class>& a, const GcRoot<mirror::Class>& b)
+        NO_THREAD_SAFETY_ANALYSIS;
+    // Same class loader and descriptor.
+    std::size_t operator()(const std::pair<const char*, mirror::ClassLoader*>& element) const
+        NO_THREAD_SAFETY_ANALYSIS;
+    bool operator()(const GcRoot<mirror::Class>& a,
+                    const std::pair<const char*, mirror::ClassLoader*>& b)
+        NO_THREAD_SAFETY_ANALYSIS;
+    // Same descriptor.
+    bool operator()(const GcRoot<mirror::Class>& a, const char* descriptor)
+        NO_THREAD_SAFETY_ANALYSIS;
+    std::size_t operator()(const char* descriptor) const NO_THREAD_SAFETY_ANALYSIS;
+  };
+  class GcRootEmptyFn {
+   public:
+    void MakeEmpty(GcRoot<mirror::Class>& item) const {
+      item = GcRoot<mirror::Class>();
+    }
+    bool IsEmpty(const GcRoot<mirror::Class>& item) const {
+      return item.IsNull();
+    }
+  };
 
-  // multimap from a string hash code of a class descriptor to
-  // mirror::Class* instances. Results should be compared for a matching
-  // Class::descriptor_ and Class::class_loader_.
-  typedef AllocationTrackingMultiMap<size_t, GcRoot<mirror::Class>, kAllocatorTagClassTable> Table;
+  // hash set which hashes class descriptor, and compares descriptors nad class loaders. Results
+  // should be compared for a matching Class descriptor and class loader.
+  typedef HashSet<GcRoot<mirror::Class>, GcRootEmptyFn, ClassDescriptorHashEquals,
+      ClassDescriptorHashEquals, TrackingAllocator<GcRoot<mirror::Class>, kAllocatorTagClassTable>>
+      Table;
   // This contains strong roots. To enable concurrent root scanning of
   // the class table, be careful to use a read barrier when accessing this.
   Table class_table_ GUARDED_BY(Locks::classlinker_classes_lock_);
-  std::vector<std::pair<size_t, GcRoot<mirror::Class>>> new_class_roots_;
+  Table pre_zygote_class_table_ GUARDED_BY(Locks::classlinker_classes_lock_);
+  std::vector<GcRoot<mirror::Class>> new_class_roots_;
 
   // Do we need to search dex caches to find image classes?
   bool dex_cache_image_class_lookup_required_;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index b112b5f..0b0200f 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1910,6 +1910,7 @@
     return;
   }
   Runtime::Current()->GetInternTable()->SwapPostZygoteWithPreZygote();
+  Runtime::Current()->GetClassLinker()->MoveClassTableToPreZygote();
   VLOG(heap) << "Starting PreZygoteFork";
   // Trim the pages at the end of the non moving space.
   non_moving_space_->Trim();
diff --git a/runtime/gc_root-inl.h b/runtime/gc_root-inl.h
index 2661e54..a42ec08 100644
--- a/runtime/gc_root-inl.h
+++ b/runtime/gc_root-inl.h
@@ -25,14 +25,9 @@
 
 template<class MirrorType>
 template<ReadBarrierOption kReadBarrierOption>
-inline MirrorType* GcRoot<MirrorType>::Read() {
+inline MirrorType* GcRoot<MirrorType>::Read() const {
   return ReadBarrier::BarrierForRoot<MirrorType, kReadBarrierOption>(&root_);
 }
 
-template<class MirrorType>
-inline void GcRoot<MirrorType>::Assign(MirrorType* value) {
-  root_ = value;
-}
-
 }  // namespace art
 #endif  // ART_RUNTIME_GC_ROOT_INL_H_
diff --git a/runtime/gc_root.h b/runtime/gc_root.h
index 8999ff9..aee5586 100644
--- a/runtime/gc_root.h
+++ b/runtime/gc_root.h
@@ -27,10 +27,9 @@
 class PACKED(4) GcRoot {
  public:
   template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
-  ALWAYS_INLINE MirrorType* Read() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  ALWAYS_INLINE void Assign(MirrorType* value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ALWAYS_INLINE MirrorType* Read() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void VisitRoot(RootCallback* callback, void* arg, uint32_t thread_id, RootType root_type) {
+  void VisitRoot(RootCallback* callback, void* arg, uint32_t thread_id, RootType root_type) const {
     DCHECK(!IsNull());
     callback(reinterpret_cast<mirror::Object**>(&root_), arg, thread_id, root_type);
     DCHECK(!IsNull());
@@ -54,7 +53,7 @@
   }
 
  private:
-  MirrorType* root_;
+  mutable MirrorType* root_;
 };
 
 }  // namespace art
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 9a280db..95c622a 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -56,13 +56,13 @@
 void InternTable::VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags) {
   MutexLock mu(Thread::Current(), *Locks::intern_table_lock_);
   if ((flags & kVisitRootFlagAllRoots) != 0) {
-    strong_interns_.VisitRoots(callback, arg, flags);
+    strong_interns_.VisitRoots(callback, arg);
   } else if ((flags & kVisitRootFlagNewRoots) != 0) {
     for (auto& root : new_strong_intern_roots_) {
       mirror::String* old_ref = root.Read<kWithoutReadBarrier>();
       root.VisitRoot(callback, arg, 0, kRootInternedString);
       mirror::String* new_ref = root.Read<kWithoutReadBarrier>();
-      if (UNLIKELY(new_ref != old_ref)) {
+      if (new_ref != old_ref) {
         // The GC moved a root in the log. Need to search the strong interns and update the
         // corresponding object. This is slow, but luckily for us, this may only happen with a
         // concurrent moving GC.
@@ -71,7 +71,6 @@
       }
     }
   }
-
   if ((flags & kVisitRootFlagClearRootLog) != 0) {
     new_strong_intern_roots_.clear();
   }
@@ -190,15 +189,15 @@
     const DexFile* dex_file = dex_cache->GetDexFile();
     // Binary search the dex file for the string index.
     const DexFile::StringId* string_id = dex_file->FindStringId(utf8.c_str());
-    if (string_id != NULL) {
+    if (string_id != nullptr) {
       uint32_t string_idx = dex_file->GetIndexForStringId(*string_id);
       mirror::String* image = dex_cache->GetResolvedString(string_idx);
-      if (image != NULL) {
+      if (image != nullptr) {
         return image;
       }
     }
   }
-  return NULL;
+  return nullptr;
 }
 
 void InternTable::AllowNewInterns() {
@@ -215,58 +214,36 @@
 }
 
 mirror::String* InternTable::Insert(mirror::String* s, bool is_strong) {
+  if (s == nullptr) {
+    return nullptr;
+  }
   Thread* self = Thread::Current();
   MutexLock mu(self, *Locks::intern_table_lock_);
-
-  DCHECK(s != NULL);
-
   while (UNLIKELY(!allow_new_interns_)) {
     new_intern_condition_.WaitHoldingLocks(self);
   }
-
-  if (is_strong) {
-    // Check the strong table for a match.
-    mirror::String* strong = LookupStrong(s);
-    if (strong != NULL) {
-      return strong;
-    }
-
-    // Check the image for a match.
-    mirror::String* image = LookupStringFromImage(s);
-    if (image != NULL) {
-      return InsertStrong(image);
-    }
-
-    // There is no match in the strong table, check the weak table.
-    mirror::String* weak = LookupWeak(s);
-    if (weak != NULL) {
-      // A match was found in the weak table. Promote to the strong table.
-      RemoveWeak(weak);
-      return InsertStrong(weak);
-    }
-
-    // No match in the strong table or the weak table. Insert into the strong
-    // table.
-    return InsertStrong(s);
-  }
-
   // Check the strong table for a match.
   mirror::String* strong = LookupStrong(s);
-  if (strong != NULL) {
+  if (strong != nullptr) {
     return strong;
   }
   // Check the image for a match.
   mirror::String* image = LookupStringFromImage(s);
-  if (image != NULL) {
-    return InsertWeak(image);
+  if (image != nullptr) {
+    return is_strong ? InsertStrong(image) : InsertWeak(image);
   }
-  // Check the weak table for a match.
+  // There is no match in the strong table, check the weak table.
   mirror::String* weak = LookupWeak(s);
-  if (weak != NULL) {
+  if (weak != nullptr) {
+    if (is_strong) {
+      // A match was found in the weak table. Promote to the strong table.
+      RemoveWeak(weak);
+      return InsertStrong(weak);
+    }
     return weak;
   }
-  // Insert into the weak table.
-  return InsertWeak(s);
+  // No match in the strong table or the weak table. Insert into the strong / weak table.
+  return is_strong ? InsertStrong(s) : InsertWeak(s);
 }
 
 mirror::String* InternTable::InternStrong(int32_t utf16_length, const char* utf8_data) {
@@ -281,16 +258,10 @@
 }
 
 mirror::String* InternTable::InternStrong(mirror::String* s) {
-  if (s == nullptr) {
-    return nullptr;
-  }
   return Insert(s, true);
 }
 
 mirror::String* InternTable::InternWeak(mirror::String* s) {
-  if (s == nullptr) {
-    return nullptr;
-  }
   return Insert(s, false);
 }
 
@@ -304,64 +275,63 @@
   weak_interns_.SweepWeaks(callback, arg);
 }
 
-std::size_t InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& root) {
+std::size_t InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& root) const {
   if (kIsDebugBuild) {
     Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
   }
-  return static_cast<size_t>(
-      const_cast<GcRoot<mirror::String>&>(root).Read<kWithoutReadBarrier>()->GetHashCode());
+  return static_cast<size_t>(root.Read()->GetHashCode());
 }
 
 bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a,
-                                               const GcRoot<mirror::String>& b) {
+                                               const GcRoot<mirror::String>& b) const {
   if (kIsDebugBuild) {
     Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
   }
-  return const_cast<GcRoot<mirror::String>&>(a).Read<kWithoutReadBarrier>()->Equals(
-      const_cast<GcRoot<mirror::String>&>(b).Read<kWithoutReadBarrier>());
+  return a.Read()->Equals(b.Read());
 }
 
 void InternTable::Table::Remove(mirror::String* s) {
-  auto it = post_zygote_table_.find(GcRoot<mirror::String>(s));
+  auto it = post_zygote_table_.Find(GcRoot<mirror::String>(s));
   if (it != post_zygote_table_.end()) {
-    post_zygote_table_.erase(it);
+    post_zygote_table_.Erase(it);
   } else {
-    it = pre_zygote_table_.find(GcRoot<mirror::String>(s));
+    it = pre_zygote_table_.Find(GcRoot<mirror::String>(s));
     DCHECK(it != pre_zygote_table_.end());
-    pre_zygote_table_.erase(it);
+    pre_zygote_table_.Erase(it);
   }
 }
 
 mirror::String* InternTable::Table::Find(mirror::String* s) {
   Locks::intern_table_lock_->AssertHeld(Thread::Current());
-  auto it = pre_zygote_table_.find(GcRoot<mirror::String>(s));
-  if (LIKELY(it != pre_zygote_table_.end())) {
-    return const_cast<GcRoot<mirror::String>&>(*it).Read<kWithReadBarrier>();
+  auto it = pre_zygote_table_.Find(GcRoot<mirror::String>(s));
+  if (it != pre_zygote_table_.end()) {
+    return it->Read();
   }
-  it = post_zygote_table_.find(GcRoot<mirror::String>(s));
-  if (LIKELY(it != post_zygote_table_.end())) {
-    return const_cast<GcRoot<mirror::String>&>(*it).Read<kWithReadBarrier>();
+  it = post_zygote_table_.Find(GcRoot<mirror::String>(s));
+  if (it != post_zygote_table_.end()) {
+    return it->Read();
   }
   return nullptr;
 }
 
 void InternTable::Table::SwapPostZygoteWithPreZygote() {
-  CHECK(pre_zygote_table_.empty());
+  CHECK(pre_zygote_table_.Empty());
   std::swap(pre_zygote_table_, post_zygote_table_);
+  VLOG(heap) << "Swapping " << pre_zygote_table_.Size() << " interns to the pre zygote table";
 }
 
 void InternTable::Table::Insert(mirror::String* s) {
   // Always insert the post zygote table, this gets swapped when we create the zygote to be the
   // pre zygote table.
-  post_zygote_table_.insert(GcRoot<mirror::String>(s));
+  post_zygote_table_.Insert(GcRoot<mirror::String>(s));
 }
 
-void InternTable::Table::VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags) {
+void InternTable::Table::VisitRoots(RootCallback* callback, void* arg) {
   for (auto& intern : pre_zygote_table_) {
-    const_cast<GcRoot<mirror::String>&>(intern).VisitRoot(callback, arg, 0, kRootInternedString);
+    intern.VisitRoot(callback, arg, 0, kRootInternedString);
   }
   for (auto& intern : post_zygote_table_) {
-    const_cast<GcRoot<mirror::String>&>(intern).VisitRoot(callback, arg, 0, kRootInternedString);
+    intern.VisitRoot(callback, arg, 0, kRootInternedString);
   }
 }
 
@@ -373,16 +343,19 @@
 void InternTable::Table::SweepWeaks(UnorderedSet* set, IsMarkedCallback* callback, void* arg) {
   for (auto it = set->begin(), end = set->end(); it != end;) {
     // This does not need a read barrier because this is called by GC.
-    GcRoot<mirror::String>& root = const_cast<GcRoot<mirror::String>&>(*it);
-    mirror::Object* object = root.Read<kWithoutReadBarrier>();
+    mirror::Object* object = it->Read<kWithoutReadBarrier>();
     mirror::Object* new_object = callback(object, arg);
     if (new_object == nullptr) {
-      it = set->erase(it);
+      it = set->Erase(it);
     } else {
-      root.Assign(down_cast<mirror::String*>(new_object));
+      *it = GcRoot<mirror::String>(new_object->AsString());
       ++it;
     }
   }
 }
 
+size_t InternTable::Table::Size() const {
+  return pre_zygote_table_.Size() + post_zygote_table_.Size();
+}
+
 }  // namespace art
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 0bff7b9..371d3f7 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -20,6 +20,7 @@
 #include <unordered_set>
 
 #include "base/allocator.h"
+#include "base/hash_set.h"
 #include "base/mutex.h"
 #include "gc_root.h"
 #include "object_callbacks.h"
@@ -98,10 +99,19 @@
  private:
   class StringHashEquals {
    public:
-    std::size_t operator()(const GcRoot<mirror::String>& root) NO_THREAD_SAFETY_ANALYSIS;
-    bool operator()(const GcRoot<mirror::String>& a, const GcRoot<mirror::String>& b)
+    std::size_t operator()(const GcRoot<mirror::String>& root) const NO_THREAD_SAFETY_ANALYSIS;
+    bool operator()(const GcRoot<mirror::String>& a, const GcRoot<mirror::String>& b) const
         NO_THREAD_SAFETY_ANALYSIS;
   };
+  class GcRootEmptyFn {
+   public:
+    void MakeEmpty(GcRoot<mirror::String>& item) const {
+      item = GcRoot<mirror::String>();
+    }
+    bool IsEmpty(const GcRoot<mirror::String>& item) const {
+      return item.IsNull();
+    }
+  };
 
   // Table which holds pre zygote and post zygote interned strings. There is one instance for
   // weak interns and strong interns.
@@ -114,19 +124,17 @@
     void Remove(mirror::String* s)
         SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
         EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
-    void VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags)
+    void VisitRoots(RootCallback* callback, void* arg)
         SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
         EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
     void SweepWeaks(IsMarkedCallback* callback, void* arg)
         SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
         EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
     void SwapPostZygoteWithPreZygote() EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
-    size_t Size() const EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_) {
-      return pre_zygote_table_.size() + post_zygote_table_.size();
-    }
+    size_t Size() const EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_);
 
    private:
-    typedef std::unordered_set<GcRoot<mirror::String>, StringHashEquals, StringHashEquals,
+    typedef HashSet<GcRoot<mirror::String>, GcRootEmptyFn, StringHashEquals, StringHashEquals,
         TrackingAllocator<GcRoot<mirror::String>, kAllocatorTagInternTable>> UnorderedSet;
 
     void SweepWeaks(UnorderedSet* set, IsMarkedCallback* callback, void* arg)
@@ -141,6 +149,7 @@
     UnorderedSet post_zygote_table_;
   };
 
+  // Insert if non null, otherwise return nullptr.
   mirror::String* Insert(mirror::String* s, bool is_strong)
       LOCKS_EXCLUDED(Locks::intern_table_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 04cc240..053769a 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -418,6 +418,7 @@
   if (IsZygote()) {
     ScopedObjectAccess soa(self);
     Runtime::Current()->GetInternTable()->AddImageStringsToTable(heap_->GetImageSpace());
+    Runtime::Current()->GetClassLinker()->MoveImageClassesToClassTable();
   }
 
   if (!IsImageDex2OatEnabled() || !Runtime::Current()->GetHeap()->HasImageSpace()) {