Merge "Avoid tripping up debuggered with the fatal crash abort"
diff --git a/compiler/dex/mir_analysis.cc b/compiler/dex/mir_analysis.cc
index 7ce8f69..8ef80fa 100644
--- a/compiler/dex/mir_analysis.cc
+++ b/compiler/dex/mir_analysis.cc
@@ -1004,6 +1004,11 @@
     return false;
   }
 
+  // Contains a pattern we don't want to compile?
+  if (punt_to_interpreter_) {
+    return true;
+  }
+
   if (compiler_filter == CompilerOptions::kInterpretOnly) {
     LOG(WARNING) << "InterpretOnly should ideally be filtered out prior to parsing.";
     return true;
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index e4550d1..2bfc154 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -86,7 +86,8 @@
       forward_branches_(0),
       compiler_temps_(arena, 6, kGrowableArrayMisc),
       num_non_special_compiler_temps_(0),
-      max_available_non_special_compiler_temps_(0) {
+      max_available_non_special_compiler_temps_(0),
+      punt_to_interpreter_(false) {
   try_block_addr_ = new (arena_) ArenaBitVector(arena_, 0, true /* expandable */);
   max_available_special_compiler_temps_ = std::abs(static_cast<int>(kVRegNonSpecialTempBaseReg))
       - std::abs(static_cast<int>(kVRegTempBaseReg));
@@ -610,6 +611,7 @@
     }
 
     int flags = Instruction::FlagsOf(insn->dalvikInsn.opcode);
+    int verify_flags = Instruction::VerifyFlagsOf(insn->dalvikInsn.opcode);
 
     uint64_t df_flags = oat_data_flow_attributes_[insn->dalvikInsn.opcode];
 
@@ -676,6 +678,19 @@
     } else if (flags & Instruction::kSwitch) {
       cur_block = ProcessCanSwitch(cur_block, insn, current_offset_, width, flags);
     }
+    if (verify_flags & Instruction::kVerifyVarArgRange) {
+      /*
+       * The Quick backend's runtime model includes a gap between a method's
+       * argument ("in") vregs and the rest of its vregs.  Handling a range instruction
+       * which spans the gap is somewhat complicated, and should not happen
+       * in normal usage of dx.  Punt to the interpreter.
+       */
+      int first_reg_in_range = insn->dalvikInsn.vC;
+      int last_reg_in_range = first_reg_in_range + insn->dalvikInsn.vA - 1;
+      if (IsInVReg(first_reg_in_range) != IsInVReg(last_reg_in_range)) {
+        punt_to_interpreter_ = true;
+      }
+    }
     current_offset_ += width;
     BasicBlock *next_block = FindBlock(current_offset_, /* split */ false, /* create */
                                       false, /* immed_pred_block_p */ NULL);
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index d344055..28e9470 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -684,6 +684,11 @@
     return opcode >= static_cast<int>(kMirOpFirst);
   }
 
+  // Is this vreg in the in set?
+  bool IsInVReg(int vreg) {
+    return (vreg >= cu_->num_regs);
+  }
+
   void DumpCheckStats();
   MIR* FindMoveResult(BasicBlock* bb, MIR* mir);
   int SRegToVReg(int ssa_reg) const;
@@ -917,6 +922,7 @@
   size_t num_non_special_compiler_temps_;
   size_t max_available_non_special_compiler_temps_;
   size_t max_available_special_compiler_temps_;
+  bool punt_to_interpreter_;                    // Difficult or not worthwhile - just interpret.
 
   friend class LocalValueNumberingTest;
 };
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index c434cdd..4352c4a 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -422,6 +422,11 @@
     return kInstructionFlags[opcode];
   }
 
+  // Return the verify flags for the given opcode.
+  static int VerifyFlagsOf(Code opcode) {
+    return kInstructionVerifyFlags[opcode];
+  }
+
   // Returns true if this instruction is a branch.
   bool IsBranch() const {
     return (kInstructionFlags[Opcode()] & kBranch) != 0;
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 7cbeb29..5339b5e 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -756,21 +756,25 @@
   RememberForGcArgumentVisitor visitor(sp, invoke_type == kStatic, shorty, shorty_len, &soa);
   visitor.VisitArguments();
   thread->EndAssertNoThreadSuspension(old_cause);
+  bool virtual_or_interface = invoke_type == kVirtual || invoke_type == kInterface;
   // Resolve method filling in dex cache.
   if (called->IsRuntimeMethod()) {
+    SirtRef<mirror::Object> sirt_receiver(soa.Self(), virtual_or_interface ? receiver : nullptr);
     called = linker->ResolveMethod(dex_method_idx, caller, invoke_type);
+    receiver = sirt_receiver.get();
   }
   const void* code = NULL;
   if (LIKELY(!thread->IsExceptionPending())) {
     // Incompatible class change should have been handled in resolve method.
     CHECK(!called->CheckIncompatibleClassChange(invoke_type));
-    // Refine called method based on receiver.
-    if (invoke_type == kVirtual) {
-      called = receiver->GetClass()->FindVirtualMethodForVirtual(called);
-    } else if (invoke_type == kInterface) {
-      called = receiver->GetClass()->FindVirtualMethodForInterface(called);
-    }
-    if ((invoke_type == kVirtual) || (invoke_type == kInterface)) {
+    if (virtual_or_interface) {
+      // Refine called method based on receiver.
+      CHECK(receiver != nullptr) << invoke_type;
+      if (invoke_type == kVirtual) {
+        called = receiver->GetClass()->FindVirtualMethodForVirtual(called);
+      } else {
+        called = receiver->GetClass()->FindVirtualMethodForInterface(called);
+      }
       // We came here because of sharpening. Ensure the dex cache is up-to-date on the method index
       // of the sharpened method.
       if (called->GetDexCacheResolvedMethods() == caller->GetDexCacheResolvedMethods()) {
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index e089ef2..89ded0b 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -256,7 +256,7 @@
   // Zygote resulting in it being prematurely freed.
   // We can only do this for primitive objects since large objects will not be within the card table
   // range. This also means that we rely on SetClass not dirtying the object's card.
-  return byte_count >= kLargeObjectThreshold && have_zygote_space_ && c->IsPrimitiveArray();
+  return byte_count >= large_object_threshold_ && c->IsPrimitiveArray();
 }
 
 template <bool kGrow>
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 8d8cdd6..2e6d2c2 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -98,6 +98,7 @@
       long_gc_log_threshold_(long_gc_log_threshold),
       ignore_max_footprint_(ignore_max_footprint),
       have_zygote_space_(false),
+      large_object_threshold_(std::numeric_limits<size_t>::max()),  // Starts out disabled.
       soft_reference_queue_(this),
       weak_reference_queue_(this),
       finalizer_reference_queue_(this),
@@ -159,11 +160,16 @@
   }
   // If we aren't the zygote, switch to the default non zygote allocator. This may update the
   // entrypoints.
-  if (!Runtime::Current()->IsZygote() || !kMovingCollector) {
+  if (!Runtime::Current()->IsZygote()) {
     ChangeCollector(post_zygote_collector_type_);
+    large_object_threshold_ = kDefaultLargeObjectThreshold;
   } else {
-    // We are the zygote, use bump pointer allocation + semi space collector.
-    ChangeCollector(kCollectorTypeSS);
+    if (kMovingCollector) {
+      // We are the zygote, use bump pointer allocation + semi space collector.
+      ChangeCollector(kCollectorTypeSS);
+    } else {
+      ChangeCollector(post_zygote_collector_type_);
+    }
   }
 
   live_bitmap_.reset(new accounting::HeapBitmap(this));
@@ -1485,15 +1491,13 @@
   main_space_->SetFootprintLimit(main_space_->Capacity());
   AddSpace(main_space_);
   have_zygote_space_ = true;
+  // Enable large object space allocations.
+  large_object_threshold_ = kDefaultLargeObjectThreshold;
   // Create the zygote space mod union table.
   accounting::ModUnionTable* mod_union_table =
       new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space);
   CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
   AddModUnionTable(mod_union_table);
-  // Reset the cumulative loggers since we now have a few additional timing phases.
-  for (const auto& collector : garbage_collectors_) {
-    collector->ResetCumulativeStatistics();
-  }
   // Can't use RosAlloc for non moving space due to thread local buffers.
   // TODO: Non limited space for non-movable objects?
   MemMap* mem_map = post_zygote_non_moving_space_mem_map_.release();
@@ -2049,7 +2053,8 @@
       TimingLogger::ScopedSplit split("AllocSpaceClearCards", &timings);
       // No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards
       // were dirty before the GC started.
-      // TODO: Don't need to use atomic.
+      // TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread)
+      // -> clean(cleaning thread).
       // The races are we either end up with: Aged card, unaged card. Since we have the checkpoint
       // roots and then we scan / update mod union tables after. We will always scan either card.
       // If we end up with the non aged card, we scan it it in the pause.
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 5d3232f..2f227d0 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -119,7 +119,7 @@
   // If true, measure the total allocation time.
   static constexpr bool kMeasureAllocationTime = false;
   // Primitive arrays larger than this size are put in the large object space.
-  static constexpr size_t kLargeObjectThreshold = 3 * kPageSize;
+  static constexpr size_t kDefaultLargeObjectThreshold = 3 * kPageSize;
 
   static constexpr size_t kDefaultInitialSize = 2 * MB;
   static constexpr size_t kDefaultMaximumSize = 32 * MB;
@@ -743,6 +743,9 @@
   // If we have a zygote space.
   bool have_zygote_space_;
 
+  // Minimum allocation size of large object.
+  size_t large_object_threshold_;
+
   // Guards access to the state of GC, associated conditional variable is used to signal when a GC
   // completes.
   Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 8158bc5..1d37775 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -141,6 +141,7 @@
                                                               allocator_type, visitor));
   }
   if (kIsDebugBuild && result != nullptr && Runtime::Current()->IsStarted()) {
+    array_class = result->GetClass();  // In case the array class moved.
     CHECK_EQ(array_class->GetComponentSize(), component_size);
     if (!fill_usable) {
       CHECK_EQ(result->SizeOf(), size);