Merge "ART: Fix Mips64 JNI Calling Convention"
diff --git a/runtime/base/hash_set.h b/runtime/base/hash_set.h
index f2b1cc0..4819f06 100644
--- a/runtime/base/hash_set.h
+++ b/runtime/base/hash_set.h
@@ -127,8 +127,8 @@
   using size_type = size_t;
   using difference_type = ptrdiff_t;
 
-  static constexpr double kDefaultMinLoadFactor = 0.5;
-  static constexpr double kDefaultMaxLoadFactor = 0.9;
+  static constexpr double kDefaultMinLoadFactor = 0.4;
+  static constexpr double kDefaultMaxLoadFactor = 0.7;
   static constexpr size_t kMinBuckets = 1000;
 
   // If we don't own the data, this will create a new array which owns the data.
@@ -138,14 +138,18 @@
     elements_until_expand_ = 0;
   }
 
-  HashSet()
+  HashSet() : HashSet(kDefaultMinLoadFactor, kDefaultMaxLoadFactor) {}
+
+  HashSet(double min_load_factor, double max_load_factor)
       : num_elements_(0u),
         num_buckets_(0u),
         elements_until_expand_(0u),
         owns_data_(false),
         data_(nullptr),
-        min_load_factor_(kDefaultMinLoadFactor),
-        max_load_factor_(kDefaultMaxLoadFactor) {
+        min_load_factor_(min_load_factor),
+        max_load_factor_(max_load_factor) {
+    DCHECK_GT(min_load_factor, 0.0);
+    DCHECK_LT(max_load_factor, 1.0);
   }
 
   explicit HashSet(const allocator_type& alloc)
@@ -459,6 +463,31 @@
     return errors;
   }
 
+  double GetMinLoadFactor() const {
+    return min_load_factor_;
+  }
+
+  double GetMaxLoadFactor() const {
+    return max_load_factor_;
+  }
+
+  // Change the load factor of the hash set. If the current load factor is greater than the max
+  // specified, then we resize the hash table storage.
+  void SetLoadFactor(double min_load_factor, double max_load_factor) {
+    DCHECK_LT(min_load_factor, max_load_factor);
+    DCHECK_GT(min_load_factor, 0.0);
+    DCHECK_LT(max_load_factor, 1.0);
+    min_load_factor_ = min_load_factor;
+    max_load_factor_ = max_load_factor;
+    elements_until_expand_ = NumBuckets() * max_load_factor_;
+    // If the current load factor isn't in the range, then resize to the mean of the minimum and
+    // maximum load factor.
+    const double load_factor = CalculateLoadFactor();
+    if (load_factor > max_load_factor_) {
+      Resize(Size() / ((min_load_factor_ + max_load_factor_) * 0.5));
+    }
+  }
+
  private:
   T& ElementForIndex(size_t index) {
     DCHECK_LT(index, NumBuckets());
diff --git a/runtime/base/hash_set_test.cc b/runtime/base/hash_set_test.cc
index 6d2c5e0..743e98e 100644
--- a/runtime/base/hash_set_test.cc
+++ b/runtime/base/hash_set_test.cc
@@ -196,6 +196,24 @@
   }
 }
 
+TEST_F(HashSetTest, TestLoadFactor) {
+  HashSet<std::string, IsEmptyFnString> hash_set;
+  static constexpr size_t kStringCount = 1000;
+  static constexpr double kEpsilon = 0.01;
+  for (size_t i = 0; i < kStringCount; ++i) {
+    hash_set.Insert(RandomString(i % 10 + 1));
+  }
+  // Check that changing the load factor resizes the table to be within the target range.
+  EXPECT_GE(hash_set.CalculateLoadFactor() + kEpsilon, hash_set.GetMinLoadFactor());
+  EXPECT_LE(hash_set.CalculateLoadFactor() - kEpsilon, hash_set.GetMaxLoadFactor());
+  hash_set.SetLoadFactor(0.1, 0.3);
+  EXPECT_DOUBLE_EQ(0.1, hash_set.GetMinLoadFactor());
+  EXPECT_DOUBLE_EQ(0.3, hash_set.GetMaxLoadFactor());
+  EXPECT_LE(hash_set.CalculateLoadFactor() - kEpsilon, hash_set.GetMaxLoadFactor());
+  hash_set.SetLoadFactor(0.6, 0.8);
+  EXPECT_LE(hash_set.CalculateLoadFactor() - kEpsilon, hash_set.GetMaxLoadFactor());
+}
+
 TEST_F(HashSetTest, TestStress) {
   HashSet<std::string, IsEmptyFnString> hash_set;
   std::unordered_multiset<std::string> std_set;
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 4ce52f1..395649e 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -1213,13 +1213,8 @@
       dex_file.NumMethodIds() != 0u || dex_file.NumFieldIds() != 0u) {
     // NOTE: We "leak" the raw_arrays because we never destroy the dex cache.
     DCHECK(image_pointer_size_ == 4u || image_pointer_size_ == 8u);
-    // When cross-compiling for a 32-bit target on a 64-bit host, we need these arrays
-    // in the low 4GiB address space so that we can store pointers in 32-bit fields.
-    // This is conveniently provided by the linear allocator.
-    raw_arrays = reinterpret_cast<uint8_t*>(
-        (sizeof(void*) == 8u && image_pointer_size_ == 4u)
-        ? Runtime::Current()->GetLinearAlloc()->Alloc(self, layout.Size())  // Zero-initialized.
-        : linear_alloc->Alloc(self, layout.Size()));  // Zero-initialized.
+    // Zero-initialized.
+    raw_arrays = reinterpret_cast<uint8_t*>(linear_alloc->Alloc(self, layout.Size()));
   }
   GcRoot<mirror::String>* strings = (dex_file.NumStringIds() == 0u) ? nullptr :
       reinterpret_cast<GcRoot<mirror::String>*>(raw_arrays + layout.StringsOffset());
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index a70967d4..fd30a46 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -529,6 +529,8 @@
       SHARED_REQUIRES(Locks::mutator_lock_)
       REQUIRES(!Locks::classlinker_classes_lock_);
 
+  // Unlike GetOrCreateAllocatorForClassLoader, GetAllocatorForClassLoader asserts that the
+  // allocator for this class loader is already created.
   static LinearAlloc* GetAllocatorForClassLoader(mirror::ClassLoader* class_loader)
       SHARED_REQUIRES(Locks::mutator_lock_);
 
diff --git a/runtime/class_table.cc b/runtime/class_table.cc
index fc8e6c4..4b0cbc8 100644
--- a/runtime/class_table.cc
+++ b/runtime/class_table.cc
@@ -21,7 +21,9 @@
 namespace art {
 
 ClassTable::ClassTable() {
-  classes_.push_back(ClassSet());
+  Runtime* const runtime = Runtime::Current();
+  classes_.push_back(ClassSet(runtime->GetHashTableMinLoadFactor(),
+                              runtime->GetHashTableMaxLoadFactor()));
 }
 
 void ClassTable::FreezeSnapshot() {
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 179353e..f4658d5 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -459,4 +459,12 @@
   }
 }
 
+InternTable::Table::Table() {
+  Runtime* const runtime = Runtime::Current();
+  pre_zygote_table_.SetLoadFactor(runtime->GetHashTableMinLoadFactor(),
+                                  runtime->GetHashTableMaxLoadFactor());
+  post_zygote_table_.SetLoadFactor(runtime->GetHashTableMinLoadFactor(),
+                                   runtime->GetHashTableMaxLoadFactor());
+}
+
 }  // namespace art
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index 24c5af9..3a4e8d8 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -146,6 +146,7 @@
   // weak interns and strong interns.
   class Table {
    public:
+    Table();
     mirror::String* Find(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_)
         REQUIRES(Locks::intern_table_lock_);
     void Insert(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_)
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index cd09bee..6c459a3 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -140,6 +140,12 @@
 
 // If a signal isn't handled properly, enable a handler that attempts to dump the Java stack.
 static constexpr bool kEnableJavaStackTraceHandler = false;
+// Tuned by compiling GmsCore under perf and measuring time spent in DescriptorEquals for class
+// linking.
+static constexpr double kLowMemoryMinLoadFactor = 0.5;
+static constexpr double kLowMemoryMaxLoadFactor = 0.8;
+static constexpr double kNormalMinLoadFactor = 0.4;
+static constexpr double kNormalMaxLoadFactor = 0.7;
 Runtime* Runtime::instance_ = nullptr;
 
 struct TraceConfig {
@@ -200,7 +206,9 @@
       no_sig_chain_(false),
       is_native_bridge_loaded_(false),
       zygote_max_failed_boots_(0),
-      experimental_flags_(ExperimentalFlags::kNone) {
+      experimental_flags_(ExperimentalFlags::kNone),
+      oat_file_manager_(nullptr),
+      is_low_memory_mode_(false) {
   CheckAsmSupportOffsetsAndSizes();
   std::fill(callee_save_methods_, callee_save_methods_ + arraysize(callee_save_methods_), 0u);
 }
@@ -886,6 +894,7 @@
 
   zygote_max_failed_boots_ = runtime_options.GetOrDefault(Opt::ZygoteMaxFailedBoots);
   experimental_flags_ = runtime_options.GetOrDefault(Opt::Experimental);
+  is_low_memory_mode_ = runtime_options.Exists(Opt::LowMemoryMode);
 
   XGcOption xgc_option = runtime_options.GetOrDefault(Opt::GcOption);
   ATRACE_BEGIN("CreateHeap");
@@ -1804,4 +1813,12 @@
       : new LinearAlloc(arena_pool_.get());
 }
 
+double Runtime::GetHashTableMinLoadFactor() const {
+  return is_low_memory_mode_ ? kLowMemoryMinLoadFactor : kNormalMinLoadFactor;
+}
+
+double Runtime::GetHashTableMaxLoadFactor() const {
+  return is_low_memory_mode_ ? kLowMemoryMaxLoadFactor : kNormalMaxLoadFactor;
+}
+
 }  // namespace art
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 458f08a..7b1fdb2 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -580,6 +580,9 @@
     return *oat_file_manager_;
   }
 
+  double GetHashTableMinLoadFactor() const;
+  double GetHashTableMaxLoadFactor() const;
+
  private:
   static void InitPlatformSignalHandlers();
 
@@ -780,6 +783,9 @@
   // Oat file manager, keeps track of what oat files are open.
   OatFileManager* oat_file_manager_;
 
+  // Whether or not we are on a low RAM device.
+  bool is_low_memory_mode_;
+
   DISALLOW_COPY_AND_ASSIGN(Runtime);
 };
 std::ostream& operator<<(std::ostream& os, const Runtime::CalleeSaveType& rhs);