Merge "Revert "[optimizing] Enable x86 long support.""
diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc
index 18d8c7a..130eed2 100644
--- a/cmdline/cmdline_parser_test.cc
+++ b/cmdline/cmdline_parser_test.cc
@@ -418,8 +418,8 @@
   * Test successes
   */
   {
-    EXPECT_SINGLE_PARSE_VALUE(true, "-Xjit", M::UseJIT);
-    EXPECT_SINGLE_PARSE_VALUE(false, "-Xnojit", M::UseJIT);
+    EXPECT_SINGLE_PARSE_VALUE(true, "-Xusejit:true", M::UseJIT);
+    EXPECT_SINGLE_PARSE_VALUE(false, "-Xusejit:false", M::UseJIT);
   }
   {
     EXPECT_SINGLE_PARSE_VALUE(MemoryKiB(16 * KB), "-Xjitcodecachesize:16K", M::JITCodeCacheCapacity);
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 8f97d1e..dbe4848 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -2202,6 +2202,7 @@
       StoreFinalValue(rl_dest, rl_result);
     } else {
       // Do the addition directly to memory.
+      ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
       OpMemReg(kOpAdd, rl_result, temp.GetReg());
     }
   }
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index a52a83a..df2b520 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -358,6 +358,7 @@
       image_(image),
       image_classes_(image_classes),
       classes_to_compile_(compiled_classes),
+      had_hard_verifier_failure_(false),
       thread_count_(thread_count),
       stats_(new AOTCompilationStats),
       dedupe_enabled_(true),
@@ -616,6 +617,11 @@
   Verify(class_loader, dex_files, thread_pool, timings);
   VLOG(compiler) << "Verify: " << GetMemoryUsageString(false);
 
+  if (had_hard_verifier_failure_ && GetCompilerOptions().AbortOnHardVerifierFailure()) {
+    LOG(FATAL) << "Had a hard failure verifying all classes, and was asked to abort in such "
+               << "situations. Please check the log.";
+  }
+
   InitializeClasses(class_loader, dex_files, thread_pool, timings);
   VLOG(compiler) << "InitializeClasses: " << GetMemoryUsageString(false);
 
@@ -1839,6 +1845,7 @@
                                                   verifier::MethodVerifier::kHardFailure) {
       LOG(ERROR) << "Verification failed on class " << PrettyDescriptor(descriptor)
                  << " because: " << error_msg;
+      manager->GetCompiler()->SetHadHardVerifierFailure();
     }
   } else if (!SkipClass(jclass_loader, dex_file, klass.Get())) {
     CHECK(klass->IsResolved()) << PrettyClass(klass.Get());
@@ -1848,6 +1855,7 @@
       // ClassLinker::VerifyClass throws, which isn't useful in the compiler.
       CHECK(soa.Self()->IsExceptionPending());
       soa.Self()->ClearException();
+      manager->GetCompiler()->SetHadHardVerifierFailure();
     }
 
     CHECK(klass->IsCompileTimeVerified() || klass->IsErroneous())
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 24b6f17..f949667 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -437,6 +437,10 @@
   // Get memory usage during compilation.
   std::string GetMemoryUsageString(bool extended) const;
 
+  void SetHadHardVerifierFailure() {
+    had_hard_verifier_failure_ = true;
+  }
+
  private:
   // These flags are internal to CompilerDriver for collecting INVOKE resolution statistics.
   // The only external contract is that unresolved method has flags 0 and resolved non-0.
@@ -575,6 +579,8 @@
   // included in the image.
   std::unique_ptr<std::set<std::string>> classes_to_compile_;
 
+  bool had_hard_verifier_failure_;
+
   size_t thread_count_;
 
   class AOTCompilationStats;
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index 067e1bd..e436f52 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -38,6 +38,7 @@
       compile_pic_(false),
       verbose_methods_(nullptr),
       pass_manager_options_(new PassManagerOptions),
+      abort_on_hard_verifier_failure_(false),
       init_failure_output_(nullptr) {
 }
 
@@ -58,7 +59,8 @@
                                  bool compile_pic,
                                  const std::vector<std::string>* verbose_methods,
                                  PassManagerOptions* pass_manager_options,
-                                 std::ostream* init_failure_output
+                                 std::ostream* init_failure_output,
+                                 bool abort_on_hard_verifier_failure
                                  ) :  // NOLINT(whitespace/parens)
     compiler_filter_(compiler_filter),
     huge_method_threshold_(huge_method_threshold),
@@ -77,6 +79,7 @@
     compile_pic_(compile_pic),
     verbose_methods_(verbose_methods),
     pass_manager_options_(pass_manager_options),
+    abort_on_hard_verifier_failure_(abort_on_hard_verifier_failure),
     init_failure_output_(init_failure_output) {
 }
 
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index fecc600..5042c75 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -70,7 +70,8 @@
                   bool compile_pic,
                   const std::vector<std::string>* verbose_methods,
                   PassManagerOptions* pass_manager_options,
-                  std::ostream* init_failure_output);
+                  std::ostream* init_failure_output,
+                  bool abort_on_hard_verifier_failure);
 
   CompilerFilter GetCompilerFilter() const {
     return compiler_filter_;
@@ -183,6 +184,10 @@
     return pass_manager_options_.get();
   }
 
+  bool AbortOnHardVerifierFailure() const {
+    return abort_on_hard_verifier_failure_;
+  }
+
  private:
   CompilerFilter compiler_filter_;
   const size_t huge_method_threshold_;
@@ -206,6 +211,10 @@
 
   std::unique_ptr<PassManagerOptions> pass_manager_options_;
 
+  // Abort compilation with an error if we find a class that fails verification with a hard
+  // failure.
+  const bool abort_on_hard_verifier_failure_;
+
   // Log initialization of initialization failures to this stream if not null.
   std::ostream* const init_failure_output_;
 
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index ff98d4a..04efa21 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -80,7 +80,8 @@
       false,  // pic
       nullptr,
       pass_manager_options,
-      nullptr));
+      nullptr,
+      false));
   const InstructionSet instruction_set = kRuntimeISA;
   instruction_set_features_.reset(InstructionSetFeatures::FromCppDefines());
   cumulative_logger_.reset(new CumulativeLogger("jit times"));
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 4b8addd..07d88de 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -78,7 +78,7 @@
  public:
   explicit NullCheckSlowPathX86(HNullCheck* instruction) : instruction_(instruction) {}
 
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     __ Bind(GetEntryLabel());
     __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pThrowNullPointer)));
     codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
@@ -93,7 +93,7 @@
  public:
   explicit DivZeroCheckSlowPathX86(HDivZeroCheck* instruction) : instruction_(instruction) {}
 
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     __ Bind(GetEntryLabel());
     __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pThrowDivZero)));
     codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
@@ -108,7 +108,7 @@
  public:
   explicit DivRemMinusOneSlowPathX86(Register reg, bool is_div) : reg_(reg), is_div_(is_div) {}
 
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     __ Bind(GetEntryLabel());
     if (is_div_) {
       __ negl(reg_);
@@ -133,7 +133,7 @@
         index_location_(index_location),
         length_location_(length_location) {}
 
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
     __ Bind(GetEntryLabel());
     // We're moving two locations to locations that could overlap, so we need a parallel
@@ -161,7 +161,7 @@
   explicit SuspendCheckSlowPathX86(HSuspendCheck* instruction, HBasicBlock* successor)
       : instruction_(instruction), successor_(successor) {}
 
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
     __ Bind(GetEntryLabel());
     codegen->SaveLiveRegisters(instruction_->GetLocations());
@@ -192,7 +192,7 @@
  public:
   explicit LoadStringSlowPathX86(HLoadString* instruction) : instruction_(instruction) {}
 
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
     DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
 
@@ -227,7 +227,7 @@
     DCHECK(at->IsLoadClass() || at->IsClinitCheck());
   }
 
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = at_->GetLocations();
     CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
     __ Bind(GetEntryLabel());
@@ -280,7 +280,7 @@
         object_class_(object_class),
         dex_pc_(dex_pc) {}
 
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
     DCHECK(instruction_->IsCheckCast()
            || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index e7e6fff..07ba95d 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -68,7 +68,7 @@
  public:
   explicit NullCheckSlowPathX86_64(HNullCheck* instruction) : instruction_(instruction) {}
 
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     __ Bind(GetEntryLabel());
     __ gs()->call(
         Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowNullPointer), true));
@@ -84,7 +84,7 @@
  public:
   explicit DivZeroCheckSlowPathX86_64(HDivZeroCheck* instruction) : instruction_(instruction) {}
 
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     __ Bind(GetEntryLabel());
     __ gs()->call(
         Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pThrowDivZero), true));
@@ -101,7 +101,7 @@
   explicit DivRemMinusOneSlowPathX86_64(Register reg, Primitive::Type type, bool is_div)
       : cpu_reg_(CpuRegister(reg)), type_(type), is_div_(is_div) {}
 
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     __ Bind(GetEntryLabel());
     if (type_ == Primitive::kPrimInt) {
       if (is_div_) {
@@ -133,7 +133,7 @@
   explicit SuspendCheckSlowPathX86_64(HSuspendCheck* instruction, HBasicBlock* successor)
       : instruction_(instruction), successor_(successor) {}
 
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
     __ Bind(GetEntryLabel());
     codegen->SaveLiveRegisters(instruction_->GetLocations());
@@ -169,7 +169,7 @@
         index_location_(index_location),
         length_location_(length_location) {}
 
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     __ Bind(GetEntryLabel());
     // We're moving two locations to locations that could overlap, so we need a parallel
     // move resolver.
@@ -202,7 +202,7 @@
     DCHECK(at->IsLoadClass() || at->IsClinitCheck());
   }
 
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = at_->GetLocations();
     CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
     __ Bind(GetEntryLabel());
@@ -249,7 +249,7 @@
  public:
   explicit LoadStringSlowPathX86_64(HLoadString* instruction) : instruction_(instruction) {}
 
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
     DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
 
@@ -286,7 +286,7 @@
         object_class_(object_class),
         dex_pc_(dex_pc) {}
 
-  virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+  void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
     LocationSummary* locations = instruction_->GetLocations();
     DCHECK(instruction_->IsCheckCast()
            || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index b7dd756..8b56166 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -669,14 +669,14 @@
 #undef FORWARD_DECLARATION
 
 #define DECLARE_INSTRUCTION(type)                                       \
-  virtual InstructionKind GetKind() const { return k##type; }           \
-  virtual const char* DebugName() const { return #type; }               \
-  virtual const H##type* As##type() const OVERRIDE { return this; }     \
-  virtual H##type* As##type() OVERRIDE { return this; }                 \
-  virtual bool InstructionTypeEquals(HInstruction* other) const {       \
+  InstructionKind GetKind() const OVERRIDE { return k##type; }          \
+  const char* DebugName() const OVERRIDE { return #type; }              \
+  const H##type* As##type() const OVERRIDE { return this; }             \
+  H##type* As##type() OVERRIDE { return this; }                         \
+  bool InstructionTypeEquals(HInstruction* other) const OVERRIDE {      \
     return other->Is##type();                                           \
   }                                                                     \
-  virtual void Accept(HGraphVisitor* visitor)
+  void Accept(HGraphVisitor* visitor) OVERRIDE
 
 template <typename T> class HUseList;
 
@@ -1351,7 +1351,7 @@
       : HInstruction(side_effects), inputs_() {}
   virtual ~HTemplateInstruction() {}
 
-  virtual size_t InputCount() const { return N; }
+  size_t InputCount() const OVERRIDE { return N; }
 
  protected:
   const HUserRecord<HInstruction*> InputRecordAt(size_t i) const OVERRIDE { return inputs_[i]; }
@@ -1373,7 +1373,7 @@
       : HTemplateInstruction<N>(side_effects), type_(type) {}
   virtual ~HExpression() {}
 
-  virtual Primitive::Type GetType() const { return type_; }
+  Primitive::Type GetType() const OVERRIDE { return type_; }
 
  protected:
   Primitive::Type type_;
@@ -1385,7 +1385,7 @@
  public:
   HReturnVoid() : HTemplateInstruction(SideEffects::None()) {}
 
-  virtual bool IsControlFlow() const { return true; }
+  bool IsControlFlow() const OVERRIDE { return true; }
 
   DECLARE_INSTRUCTION(ReturnVoid);
 
@@ -1401,7 +1401,7 @@
     SetRawInputAt(0, value);
   }
 
-  virtual bool IsControlFlow() const { return true; }
+  bool IsControlFlow() const OVERRIDE { return true; }
 
   DECLARE_INSTRUCTION(Return);
 
@@ -1416,7 +1416,7 @@
  public:
   HExit() : HTemplateInstruction(SideEffects::None()) {}
 
-  virtual bool IsControlFlow() const { return true; }
+  bool IsControlFlow() const OVERRIDE { return true; }
 
   DECLARE_INSTRUCTION(Exit);
 
@@ -1478,8 +1478,8 @@
   HInstruction* GetInput() const { return InputAt(0); }
   Primitive::Type GetResultType() const { return GetType(); }
 
-  virtual bool CanBeMoved() const { return true; }
-  virtual bool InstructionDataEquals(HInstruction* other) const {
+  bool CanBeMoved() const OVERRIDE { return true; }
+  bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
     UNUSED(other);
     return true;
   }
@@ -1546,8 +1546,8 @@
     }
   }
 
-  virtual bool CanBeMoved() const { return true; }
-  virtual bool InstructionDataEquals(HInstruction* other) const {
+  bool CanBeMoved() const OVERRIDE { return true; }
+  bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
     UNUSED(other);
     return true;
   }
@@ -1600,16 +1600,16 @@
 
   bool IsCommutative() const OVERRIDE { return true; }
 
-  virtual int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
+  int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
     return x == y ? 1 : 0;
   }
-  virtual int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
+  int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
     return x == y ? 1 : 0;
   }
 
   DECLARE_INSTRUCTION(Equal);
 
-  virtual IfCondition GetCondition() const {
+  IfCondition GetCondition() const OVERRIDE {
     return kCondEQ;
   }
 
@@ -1624,16 +1624,16 @@
 
   bool IsCommutative() const OVERRIDE { return true; }
 
-  virtual int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
+  int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
     return x != y ? 1 : 0;
   }
-  virtual int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
+  int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
     return x != y ? 1 : 0;
   }
 
   DECLARE_INSTRUCTION(NotEqual);
 
-  virtual IfCondition GetCondition() const {
+  IfCondition GetCondition() const OVERRIDE {
     return kCondNE;
   }
 
@@ -1646,16 +1646,16 @@
   HLessThan(HInstruction* first, HInstruction* second)
       : HCondition(first, second) {}
 
-  virtual int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
+  int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
     return x < y ? 1 : 0;
   }
-  virtual int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
+  int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
     return x < y ? 1 : 0;
   }
 
   DECLARE_INSTRUCTION(LessThan);
 
-  virtual IfCondition GetCondition() const {
+  IfCondition GetCondition() const OVERRIDE {
     return kCondLT;
   }
 
@@ -1668,16 +1668,16 @@
   HLessThanOrEqual(HInstruction* first, HInstruction* second)
       : HCondition(first, second) {}
 
-  virtual int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
+  int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
     return x <= y ? 1 : 0;
   }
-  virtual int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
+  int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
     return x <= y ? 1 : 0;
   }
 
   DECLARE_INSTRUCTION(LessThanOrEqual);
 
-  virtual IfCondition GetCondition() const {
+  IfCondition GetCondition() const OVERRIDE {
     return kCondLE;
   }
 
@@ -1690,16 +1690,16 @@
   HGreaterThan(HInstruction* first, HInstruction* second)
       : HCondition(first, second) {}
 
-  virtual int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
+  int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
     return x > y ? 1 : 0;
   }
-  virtual int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
+  int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
     return x > y ? 1 : 0;
   }
 
   DECLARE_INSTRUCTION(GreaterThan);
 
-  virtual IfCondition GetCondition() const {
+  IfCondition GetCondition() const OVERRIDE {
     return kCondGT;
   }
 
@@ -1712,16 +1712,16 @@
   HGreaterThanOrEqual(HInstruction* first, HInstruction* second)
       : HCondition(first, second) {}
 
-  virtual int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
+  int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
     return x >= y ? 1 : 0;
   }
-  virtual int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
+  int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
     return x >= y ? 1 : 0;
   }
 
   DECLARE_INSTRUCTION(GreaterThanOrEqual);
 
-  virtual IfCondition GetCondition() const {
+  IfCondition GetCondition() const OVERRIDE {
     return kCondGE;
   }
 
@@ -1830,7 +1830,7 @@
  public:
   explicit HConstant(Primitive::Type type) : HExpression(type, SideEffects::None()) {}
 
-  virtual bool CanBeMoved() const { return true; }
+  bool CanBeMoved() const OVERRIDE { return true; }
 
   DECLARE_INSTRUCTION(Constant);
 
@@ -1844,12 +1844,12 @@
 
   float GetValue() const { return value_; }
 
-  virtual bool InstructionDataEquals(HInstruction* other) const {
+  bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
     return bit_cast<float, int32_t>(other->AsFloatConstant()->value_) ==
         bit_cast<float, int32_t>(value_);
   }
 
-  virtual size_t ComputeHashCode() const { return static_cast<size_t>(GetValue()); }
+  size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
 
   DECLARE_INSTRUCTION(FloatConstant);
 
@@ -1865,12 +1865,12 @@
 
   double GetValue() const { return value_; }
 
-  virtual bool InstructionDataEquals(HInstruction* other) const {
+  bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
     return bit_cast<double, int64_t>(other->AsDoubleConstant()->value_) ==
         bit_cast<double, int64_t>(value_);
   }
 
-  virtual size_t ComputeHashCode() const { return static_cast<size_t>(GetValue()); }
+  size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
 
   DECLARE_INSTRUCTION(DoubleConstant);
 
@@ -1931,11 +1931,11 @@
 
   int64_t GetValue() const { return value_; }
 
-  virtual bool InstructionDataEquals(HInstruction* other) const {
+  bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
     return other->AsLongConstant()->value_ == value_;
   }
 
-  virtual size_t ComputeHashCode() const { return static_cast<size_t>(GetValue()); }
+  size_t ComputeHashCode() const OVERRIDE { return static_cast<size_t>(GetValue()); }
 
   DECLARE_INSTRUCTION(LongConstant);
 
@@ -1957,7 +1957,7 @@
 
 class HInvoke : public HInstruction {
  public:
-  virtual size_t InputCount() const { return inputs_.Size(); }
+  size_t InputCount() const OVERRIDE { return inputs_.Size(); }
 
   // Runtime needs to walk the stack, so Dex -> Dex calls need to
   // know their environment.
@@ -1967,7 +1967,7 @@
     SetRawInputAt(index, argument);
   }
 
-  virtual Primitive::Type GetType() const { return return_type_; }
+  Primitive::Type GetType() const OVERRIDE { return return_type_; }
 
   uint32_t GetDexPc() const { return dex_pc_; }
 
@@ -2135,8 +2135,8 @@
   explicit HNeg(Primitive::Type result_type, HInstruction* input)
       : HUnaryOperation(result_type, input) {}
 
-  virtual int32_t Evaluate(int32_t x) const OVERRIDE { return -x; }
-  virtual int64_t Evaluate(int64_t x) const OVERRIDE { return -x; }
+  int32_t Evaluate(int32_t x) const OVERRIDE { return -x; }
+  int64_t Evaluate(int64_t x) const OVERRIDE { return -x; }
 
   DECLARE_INSTRUCTION(Neg);
 
@@ -2182,12 +2182,12 @@
   HAdd(Primitive::Type result_type, HInstruction* left, HInstruction* right)
       : HBinaryOperation(result_type, left, right) {}
 
-  virtual bool IsCommutative() const OVERRIDE { return true; }
+  bool IsCommutative() const OVERRIDE { return true; }
 
-  virtual int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
+  int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
     return x + y;
   }
-  virtual int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
+  int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
     return x + y;
   }
 
@@ -2202,10 +2202,10 @@
   HSub(Primitive::Type result_type, HInstruction* left, HInstruction* right)
       : HBinaryOperation(result_type, left, right) {}
 
-  virtual int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
+  int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
     return x - y;
   }
-  virtual int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
+  int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
     return x - y;
   }
 
@@ -2220,10 +2220,10 @@
   HMul(Primitive::Type result_type, HInstruction* left, HInstruction* right)
       : HBinaryOperation(result_type, left, right) {}
 
-  virtual bool IsCommutative() const OVERRIDE { return true; }
+  bool IsCommutative() const OVERRIDE { return true; }
 
-  virtual int32_t Evaluate(int32_t x, int32_t y) const { return x * y; }
-  virtual int64_t Evaluate(int64_t x, int64_t y) const { return x * y; }
+  int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE { return x * y; }
+  int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE { return x * y; }
 
   DECLARE_INSTRUCTION(Mul);
 
@@ -2236,14 +2236,14 @@
   HDiv(Primitive::Type result_type, HInstruction* left, HInstruction* right, uint32_t dex_pc)
       : HBinaryOperation(result_type, left, right), dex_pc_(dex_pc) {}
 
-  virtual int32_t Evaluate(int32_t x, int32_t y) const {
+  int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
     // Our graph structure ensures we never have 0 for `y` during constant folding.
     DCHECK_NE(y, 0);
     // Special case -1 to avoid getting a SIGFPE on x86(_64).
     return (y == -1) ? -x : x / y;
   }
 
-  virtual int64_t Evaluate(int64_t x, int64_t y) const {
+  int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
     DCHECK_NE(y, 0);
     // Special case -1 to avoid getting a SIGFPE on x86(_64).
     return (y == -1) ? -x : x / y;
@@ -2264,13 +2264,13 @@
   HRem(Primitive::Type result_type, HInstruction* left, HInstruction* right, uint32_t dex_pc)
       : HBinaryOperation(result_type, left, right), dex_pc_(dex_pc) {}
 
-  virtual int32_t Evaluate(int32_t x, int32_t y) const {
+  int32_t Evaluate(int32_t x, int32_t y) const OVERRIDE {
     DCHECK_NE(y, 0);
     // Special case -1 to avoid getting a SIGFPE on x86(_64).
     return (y == -1) ? 0 : x % y;
   }
 
-  virtual int64_t Evaluate(int64_t x, int64_t y) const {
+  int64_t Evaluate(int64_t x, int64_t y) const OVERRIDE {
     DCHECK_NE(y, 0);
     // Special case -1 to avoid getting a SIGFPE on x86(_64).
     return (y == -1) ? 0 : x % y;
@@ -2441,14 +2441,14 @@
   explicit HNot(Primitive::Type result_type, HInstruction* input)
       : HUnaryOperation(result_type, input) {}
 
-  virtual bool CanBeMoved() const { return true; }
-  virtual bool InstructionDataEquals(HInstruction* other) const {
+  bool CanBeMoved() const OVERRIDE { return true; }
+  bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
     UNUSED(other);
     return true;
   }
 
-  virtual int32_t Evaluate(int32_t x) const OVERRIDE { return ~x; }
-  virtual int64_t Evaluate(int64_t x) const OVERRIDE { return ~x; }
+  int32_t Evaluate(int32_t x) const OVERRIDE { return ~x; }
+  int64_t Evaluate(int64_t x) const OVERRIDE { return ~x; }
 
   DECLARE_INSTRUCTION(Not);
 
@@ -2772,15 +2772,15 @@
     SetRawInputAt(1, length);
   }
 
-  virtual bool CanBeMoved() const { return true; }
-  virtual bool InstructionDataEquals(HInstruction* other) const {
+  bool CanBeMoved() const OVERRIDE { return true; }
+  bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
     UNUSED(other);
     return true;
   }
 
-  virtual bool NeedsEnvironment() const { return true; }
+  bool NeedsEnvironment() const OVERRIDE { return true; }
 
-  virtual bool CanThrow() const { return true; }
+  bool CanThrow() const OVERRIDE { return true; }
 
   uint32_t GetDexPc() const { return dex_pc_; }
 
@@ -2824,7 +2824,7 @@
   explicit HSuspendCheck(uint32_t dex_pc)
       : HTemplateInstruction(SideEffects::None()), dex_pc_(dex_pc) {}
 
-  virtual bool NeedsEnvironment() const {
+  bool NeedsEnvironment() const OVERRIDE {
     return true;
   }
 
@@ -3352,7 +3352,7 @@
 
   // Visit functions that delegate to to super class.
 #define DECLARE_VISIT_INSTRUCTION(name, super)                                        \
-  virtual void Visit##name(H##name* instr) OVERRIDE { Visit##super(instr); }
+  void Visit##name(H##name* instr) OVERRIDE { Visit##super(instr); }
 
   FOR_EACH_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
 
diff --git a/compiler/optimizing/parallel_move_test.cc b/compiler/optimizing/parallel_move_test.cc
index 44a3da2..817a44b 100644
--- a/compiler/optimizing/parallel_move_test.cc
+++ b/compiler/optimizing/parallel_move_test.cc
@@ -36,7 +36,7 @@
     }
   }
 
-  virtual void EmitMove(size_t index) {
+  void EmitMove(size_t index) OVERRIDE {
     MoveOperands* move = moves_.Get(index);
     if (!message_.str().empty()) {
       message_ << " ";
@@ -48,7 +48,7 @@
     message_ << ")";
   }
 
-  virtual void EmitSwap(size_t index) {
+  void EmitSwap(size_t index) OVERRIDE {
     MoveOperands* move = moves_.Get(index);
     if (!message_.str().empty()) {
       message_ << " ";
@@ -60,8 +60,8 @@
     message_ << ")";
   }
 
-  virtual void SpillScratch(int reg ATTRIBUTE_UNUSED) {}
-  virtual void RestoreScratch(int reg ATTRIBUTE_UNUSED) {}
+  void SpillScratch(int reg ATTRIBUTE_UNUSED) OVERRIDE {}
+  void RestoreScratch(int reg ATTRIBUTE_UNUSED) OVERRIDE {}
 
   std::string GetMessage() const {
     return  message_.str();
diff --git a/compiler/optimizing/prepare_for_register_allocation.h b/compiler/optimizing/prepare_for_register_allocation.h
index 0f697fb..c28507c 100644
--- a/compiler/optimizing/prepare_for_register_allocation.h
+++ b/compiler/optimizing/prepare_for_register_allocation.h
@@ -33,12 +33,12 @@
   void Run();
 
  private:
-  virtual void VisitNullCheck(HNullCheck* check) OVERRIDE;
-  virtual void VisitDivZeroCheck(HDivZeroCheck* check) OVERRIDE;
-  virtual void VisitBoundsCheck(HBoundsCheck* check) OVERRIDE;
-  virtual void VisitBoundType(HBoundType* bound_type) OVERRIDE;
-  virtual void VisitClinitCheck(HClinitCheck* check) OVERRIDE;
-  virtual void VisitCondition(HCondition* condition) OVERRIDE;
+  void VisitNullCheck(HNullCheck* check) OVERRIDE;
+  void VisitDivZeroCheck(HDivZeroCheck* check) OVERRIDE;
+  void VisitBoundsCheck(HBoundsCheck* check) OVERRIDE;
+  void VisitBoundType(HBoundType* bound_type) OVERRIDE;
+  void VisitClinitCheck(HClinitCheck* check) OVERRIDE;
+  void VisitCondition(HCondition* condition) OVERRIDE;
 
   DISALLOW_COPY_AND_ASSIGN(PrepareForRegisterAllocation);
 };
diff --git a/compiler/optimizing/pretty_printer.h b/compiler/optimizing/pretty_printer.h
index d2a21c8..934514e 100644
--- a/compiler/optimizing/pretty_printer.h
+++ b/compiler/optimizing/pretty_printer.h
@@ -32,7 +32,7 @@
     PrintString(": ");
   }
 
-  virtual void VisitInstruction(HInstruction* instruction) {
+  void VisitInstruction(HInstruction* instruction) OVERRIDE {
     PrintPreInstruction(instruction);
     PrintString(instruction->DebugName());
     PrintPostInstruction(instruction);
@@ -68,7 +68,7 @@
     PrintNewLine();
   }
 
-  virtual void VisitBasicBlock(HBasicBlock* block) {
+  void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
     PrintString("BasicBlock ");
     PrintInt(block->GetBlockId());
     const GrowableArray<HBasicBlock*>& predecessors = block->GetPredecessors();
@@ -106,15 +106,15 @@
   explicit StringPrettyPrinter(HGraph* graph)
       : HPrettyPrinter(graph), str_(""), current_block_(nullptr) { }
 
-  virtual void PrintInt(int value) {
+  void PrintInt(int value) OVERRIDE {
     str_ += StringPrintf("%d", value);
   }
 
-  virtual void PrintString(const char* value) {
+  void PrintString(const char* value) OVERRIDE {
     str_ += value;
   }
 
-  virtual void PrintNewLine() {
+  void PrintNewLine() OVERRIDE {
     str_ += '\n';
   }
 
@@ -122,12 +122,12 @@
 
   std::string str() const { return str_; }
 
-  virtual void VisitBasicBlock(HBasicBlock* block) {
+  void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
     current_block_ = block;
     HPrettyPrinter::VisitBasicBlock(block);
   }
 
-  virtual void VisitGoto(HGoto* gota) {
+  void VisitGoto(HGoto* gota) OVERRIDE {
     PrintString("  ");
     PrintInt(gota->GetId());
     PrintString(": Goto ");
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 54e62a5..748ab22 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -1381,6 +1381,7 @@
                         : Location::StackSlot(interval->GetParent()->GetSpillSlot()));
   }
   UsePosition* use = current->GetFirstUse();
+  size_t safepoint_index = safepoints_.Size();
 
   // Walk over all siblings, updating locations of use positions, and
   // connecting them when they are adjacent.
@@ -1422,19 +1423,27 @@
     }
 
     // At each safepoint, we record stack and register information.
-    for (size_t i = 0, e = safepoints_.Size(); i < e; ++i) {
-      HInstruction* safepoint = safepoints_.Get(i);
+    // We iterate backwards to test safepoints in ascending order of positions,
+    // which is what LiveInterval::Covers is optimized for.
+    for (; safepoint_index > 0; --safepoint_index) {
+      HInstruction* safepoint = safepoints_.Get(safepoint_index - 1);
       size_t position = safepoint->GetLifetimePosition();
-      LocationSummary* locations = safepoint->GetLocations();
-      if (!current->Covers(position)) {
+
+      // Test that safepoints are ordered in the optimal way.
+      DCHECK(safepoint_index == safepoints_.Size()
+             || safepoints_.Get(safepoint_index)->GetLifetimePosition() <= position);
+
+      if (current->IsDeadAt(position)) {
+        break;
+      } else if (!current->Covers(position)) {
         continue;
-      }
-      if (interval->GetStart() == position) {
+      } else if (interval->GetStart() == position) {
         // The safepoint is for this instruction, so the location of the instruction
         // does not need to be saved.
         continue;
       }
 
+      LocationSummary* locations = safepoint->GetLocations();
       if ((current->GetType() == Primitive::kPrimNot) && current->GetParent()->HasSpillSlot()) {
         locations->SetStackBit(current->GetParent()->GetSpillSlot() / kVRegSize);
       }
diff --git a/compiler/optimizing/ssa_test.cc b/compiler/optimizing/ssa_test.cc
index 7fc1ec6..a05b38c9 100644
--- a/compiler/optimizing/ssa_test.cc
+++ b/compiler/optimizing/ssa_test.cc
@@ -32,15 +32,15 @@
  public:
   explicit SsaPrettyPrinter(HGraph* graph) : HPrettyPrinter(graph), str_("") {}
 
-  virtual void PrintInt(int value) {
+  void PrintInt(int value) OVERRIDE {
     str_ += StringPrintf("%d", value);
   }
 
-  virtual void PrintString(const char* value) {
+  void PrintString(const char* value) OVERRIDE {
     str_ += value;
   }
 
-  virtual void PrintNewLine() {
+  void PrintNewLine() OVERRIDE {
     str_ += '\n';
   }
 
@@ -48,7 +48,7 @@
 
   std::string str() const { return str_; }
 
-  virtual void VisitIntConstant(HIntConstant* constant) {
+  void VisitIntConstant(HIntConstant* constant) OVERRIDE {
     PrintPreInstruction(constant);
     str_ += constant->DebugName();
     str_ += " ";
diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc
index 744fb45..5b02510 100644
--- a/compiler/optimizing/stack_map_test.cc
+++ b/compiler/optimizing/stack_map_test.cc
@@ -31,13 +31,19 @@
   return true;
 }
 
+size_t ComputeDexRegisterMapSize(size_t number_of_dex_registers) {
+  return DexRegisterMap::kFixedSize
+      + number_of_dex_registers * DexRegisterMap::SingleEntrySize();
+}
+
 TEST(StackMapTest, Test1) {
   ArenaPool pool;
   ArenaAllocator arena(&pool);
   StackMapStream stream(&arena);
 
   ArenaBitVector sp_mask(&arena, 0, false);
-  stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, 2, 0);
+  size_t number_of_dex_registers = 2;
+  stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0);
   stream.AddDexRegisterEntry(DexRegisterMap::kInStack, 0);
   stream.AddDexRegisterEntry(DexRegisterMap::kConstant, -2);
 
@@ -56,17 +62,21 @@
   ASSERT_EQ(0u, stack_map.GetDexPc());
   ASSERT_EQ(64u, stack_map.GetNativePcOffset());
   ASSERT_EQ(0x3u, stack_map.GetRegisterMask());
-  ASSERT_FALSE(stack_map.HasInlineInfo());
 
   MemoryRegion stack_mask = stack_map.GetStackMask();
   ASSERT_TRUE(SameBits(stack_mask, sp_mask));
 
   ASSERT_TRUE(stack_map.HasDexRegisterMap());
-  DexRegisterMap dex_registers = code_info.GetDexRegisterMapOf(stack_map, 2);
+  DexRegisterMap dex_registers =
+      code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
+  ASSERT_EQ(16u, dex_registers.Size());
+  ASSERT_EQ(16u, ComputeDexRegisterMapSize(number_of_dex_registers));
   ASSERT_EQ(DexRegisterMap::kInStack, dex_registers.GetLocationKind(0));
   ASSERT_EQ(DexRegisterMap::kConstant, dex_registers.GetLocationKind(1));
   ASSERT_EQ(0, dex_registers.GetValue(0));
   ASSERT_EQ(-2, dex_registers.GetValue(1));
+
+  ASSERT_FALSE(stack_map.HasInlineInfo());
 }
 
 TEST(StackMapTest, Test2) {
@@ -77,7 +87,8 @@
   ArenaBitVector sp_mask1(&arena, 0, true);
   sp_mask1.SetBit(2);
   sp_mask1.SetBit(4);
-  stream.AddStackMapEntry(0, 64, 0x3, &sp_mask1, 2, 2);
+  size_t number_of_dex_registers = 2;
+  stream.AddStackMapEntry(0, 64, 0x3, &sp_mask1, number_of_dex_registers, 2);
   stream.AddDexRegisterEntry(DexRegisterMap::kInStack, 0);
   stream.AddDexRegisterEntry(DexRegisterMap::kConstant, -2);
   stream.AddInlineInfoEntry(42);
@@ -86,8 +97,9 @@
   ArenaBitVector sp_mask2(&arena, 0, true);
   sp_mask2.SetBit(3);
   sp_mask1.SetBit(8);
-  stream.AddStackMapEntry(1, 128, 0xFF, &sp_mask2, 1, 0);
-  stream.AddDexRegisterEntry(DexRegisterMap::kInRegister, 0);
+  stream.AddStackMapEntry(1, 128, 0xFF, &sp_mask2, number_of_dex_registers, 0);
+  stream.AddDexRegisterEntry(DexRegisterMap::kInRegister, 18);
+  stream.AddDexRegisterEntry(DexRegisterMap::kInFpuRegister, 3);
 
   size_t size = stream.ComputeNeededSize();
   void* memory = arena.Alloc(size, kArenaAllocMisc);
@@ -98,6 +110,7 @@
   ASSERT_EQ(1u, code_info.GetStackMaskSize());
   ASSERT_EQ(2u, code_info.GetNumberOfStackMaps());
 
+  // First stack map.
   StackMap stack_map = code_info.GetStackMapAt(0);
   ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0)));
   ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64)));
@@ -109,17 +122,22 @@
   ASSERT_TRUE(SameBits(stack_mask, sp_mask1));
 
   ASSERT_TRUE(stack_map.HasDexRegisterMap());
-  DexRegisterMap dex_registers = code_info.GetDexRegisterMapOf(stack_map, 2);
+  DexRegisterMap dex_registers =
+      code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
+  ASSERT_EQ(16u, dex_registers.Size());
+  ASSERT_EQ(16u, ComputeDexRegisterMapSize(number_of_dex_registers));
   ASSERT_EQ(DexRegisterMap::kInStack, dex_registers.GetLocationKind(0));
   ASSERT_EQ(DexRegisterMap::kConstant, dex_registers.GetLocationKind(1));
   ASSERT_EQ(0, dex_registers.GetValue(0));
   ASSERT_EQ(-2, dex_registers.GetValue(1));
 
+  ASSERT_TRUE(stack_map.HasInlineInfo());
   InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map);
   ASSERT_EQ(2u, inline_info.GetDepth());
   ASSERT_EQ(42u, inline_info.GetMethodReferenceIndexAtDepth(0));
   ASSERT_EQ(82u, inline_info.GetMethodReferenceIndexAtDepth(1));
 
+  // Second stack map.
   stack_map = code_info.GetStackMapAt(1);
   ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(1u)));
   ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(128u)));
@@ -130,6 +148,16 @@
   stack_mask = stack_map.GetStackMask();
   ASSERT_TRUE(SameBits(stack_mask, sp_mask2));
 
+  ASSERT_TRUE(stack_map.HasDexRegisterMap());
+  dex_registers =
+      code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers);
+  ASSERT_EQ(16u, dex_registers.Size());
+  ASSERT_EQ(16u, ComputeDexRegisterMapSize(number_of_dex_registers));
+  ASSERT_EQ(DexRegisterMap::kInRegister, dex_registers.GetLocationKind(0));
+  ASSERT_EQ(DexRegisterMap::kInFpuRegister, dex_registers.GetLocationKind(1));
+  ASSERT_EQ(18, dex_registers.GetValue(0));
+  ASSERT_EQ(3, dex_registers.GetValue(1));
+
   ASSERT_FALSE(stack_map.HasInlineInfo());
 }
 
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index c080453..8572f4d 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -492,6 +492,7 @@
     bool include_debug_symbols = kIsDebugBuild;
     bool watch_dog_enabled = true;
     bool generate_gdb_information = kIsDebugBuild;
+    bool abort_on_hard_verifier_error = false;
 
     PassManagerOptions pass_manager_options;
 
@@ -732,6 +733,8 @@
         if (swap_fd_ < 0) {
           Usage("--swap-fd passed a negative value %d", swap_fd_);
         }
+      } else if (option == "--abort-on-hard-verifier-error") {
+        abort_on_hard_verifier_error = true;
       } else {
         Usage("Unknown argument %s", option.data());
       }
@@ -941,7 +944,8 @@
                                                     nullptr :
                                                     &verbose_methods_,
                                                 new PassManagerOptions(pass_manager_options),
-                                                init_failure_output_.get()));
+                                                init_failure_output_.get(),
+                                                abort_on_hard_verifier_error));
 
     // Done with usage checks, enable watchdog if requested
     if (watch_dog_enabled) {
diff --git a/disassembler/disassembler_arm64.cc b/disassembler/disassembler_arm64.cc
index 4ff44b4..d195efc 100644
--- a/disassembler/disassembler_arm64.cc
+++ b/disassembler/disassembler_arm64.cc
@@ -43,18 +43,15 @@
     const vixl::Instruction* instr,
     const vixl::CPURegister& reg) {
   USE(instr);
-  if (reg.IsRegister()) {
-    switch (reg.code()) {
-      case IP0: AppendToOutput(reg.Is64Bits() ? "ip0" : "wip0"); return;
-      case IP1: AppendToOutput(reg.Is64Bits() ? "ip1" : "wip1"); return;
-      case TR:  AppendToOutput(reg.Is64Bits() ? "tr"  :  "w18"); return;
-      case ETR: AppendToOutput(reg.Is64Bits() ? "etr" :  "w21"); return;
-      case FP:  AppendToOutput(reg.Is64Bits() ? "fp"  :  "w29"); return;
-      case LR:  AppendToOutput(reg.Is64Bits() ? "lr"  :  "w30"); return;
-      default:
-        // Fall through.
-        break;
+  if (reg.IsRegister() && reg.Is64Bits()) {
+    if (reg.code() == TR) {
+      AppendToOutput("tr");
+      return;
+    } else if (reg.code() == LR) {
+      AppendToOutput("lr");
+      return;
     }
+    // Fall through.
   }
   // Print other register names as usual.
   Disassembler::AppendRegisterNameToOutput(instr, reg);
@@ -105,13 +102,7 @@
 size_t DisassemblerArm64::Dump(std::ostream& os, const uint8_t* begin) {
   const vixl::Instruction* instr = reinterpret_cast<const vixl::Instruction*>(begin);
   decoder.Decode(instr);
-  // TODO: Use FormatInstructionPointer() once VIXL provides the appropriate
-  // features.
-  // VIXL does not yet allow remapping addresses disassembled. Using
-  // FormatInstructionPointer() would show incoherences between the instruction
-  // location addresses and the target addresses disassembled by VIXL (eg. for
-  // branch instructions).
-  os << StringPrintf("%p", instr)
+    os << FormatInstructionPointer(begin)
      << StringPrintf(": %08x\t%s\n", instr->InstructionBits(), disasm.GetOutput());
   return vixl::kInstructionSize;
 }
diff --git a/disassembler/disassembler_arm64.h b/disassembler/disassembler_arm64.h
index 57f11c8..3fb5c7f 100644
--- a/disassembler/disassembler_arm64.h
+++ b/disassembler/disassembler_arm64.h
@@ -30,8 +30,12 @@
 
 class CustomDisassembler FINAL : public vixl::Disassembler {
  public:
-  explicit CustomDisassembler(bool read_literals) :
-      vixl::Disassembler(), read_literals_(read_literals) {}
+  explicit CustomDisassembler(DisassemblerOptions* options) :
+      vixl::Disassembler(), read_literals_(options->can_read_literals_) {
+    if (!options->absolute_addresses_) {
+      MapCodeAddress(0, reinterpret_cast<const vixl::Instruction*>(options->base_address_));
+    }
+  }
 
   // Use register aliases in the disassembly.
   void AppendRegisterNameToOutput(const vixl::Instruction* instr,
@@ -55,11 +59,8 @@
 
 class DisassemblerArm64 FINAL : public Disassembler {
  public:
-  // TODO: Update this code once VIXL provides the ability to map code addresses
-  // to disassemble as a different address (the way FormatInstructionPointer()
-  // does).
   explicit DisassemblerArm64(DisassemblerOptions* options) :
-      Disassembler(options), disasm(options->can_read_literals_) {
+      Disassembler(options), disasm(options) {
     decoder.AppendVisitor(&disasm);
   }
 
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 057eed1..dd45eca 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -206,7 +206,7 @@
     }
     cc->is_marking_ = true;
     if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
-      CHECK(Runtime::Current()->IsCompiler());
+      CHECK(Runtime::Current()->IsAotCompiler());
       TimingLogger::ScopedTiming split2("(Paused)VisitTransactionRoots", cc->GetTimings());
       Runtime::Current()->VisitTransactionRoots(ConcurrentCopying::ProcessRootCallback, cc);
     }
diff --git a/runtime/jdwp/jdwp_main.cc b/runtime/jdwp/jdwp_main.cc
index b6fedd9..3d69796 100644
--- a/runtime/jdwp/jdwp_main.cc
+++ b/runtime/jdwp/jdwp_main.cc
@@ -408,7 +408,7 @@
 void JdwpState::Run() {
   Runtime* runtime = Runtime::Current();
   CHECK(runtime->AttachCurrentThread("JDWP", true, runtime->GetSystemThreadGroup(),
-                                     !runtime->IsCompiler()));
+                                     !runtime->IsAotCompiler()));
 
   VLOG(jdwp) << "JDWP: thread running";
 
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 9d87ed7..607569a 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -146,8 +146,9 @@
       .Define({"-XX:EnableHSpaceCompactForOOM", "-XX:DisableHSpaceCompactForOOM"})
           .WithValues({true, false})
           .IntoKey(M::EnableHSpaceCompactForOOM)
-      .Define({"-Xjit", "-Xnojit"})
-          .WithValues({true, false})
+      .Define("-Xusejit:_")
+          .WithType<bool>()
+          .WithValueMap({{"false", false}, {"true", true}})
           .IntoKey(M::UseJIT)
       .Define("-Xjitcodecachesize:_")
           .WithType<MemoryKiB>()
@@ -642,8 +643,7 @@
   UsageMessage(stream, "  -Xcompiler-option dex2oat-option\n");
   UsageMessage(stream, "  -Ximage-compiler-option dex2oat-option\n");
   UsageMessage(stream, "  -Xpatchoat:filename\n");
-  UsageMessage(stream, "  -Xjit\n");
-  UsageMessage(stream, "  -Xnojit\n");
+  UsageMessage(stream, "  -Xusejit:booleanvalue\n");
   UsageMessage(stream, "  -X[no]relocate\n");
   UsageMessage(stream, "  -X[no]dex2oat (Whether to invoke dex2oat on the application)\n");
   UsageMessage(stream, "  -X[no]image-dex2oat (Whether to create and use a boot image)\n");
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 6d9f20e..35a9e6f 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -846,7 +846,7 @@
     Dbg::ConfigureJdwp(runtime_options.GetOrDefault(Opt::JdwpOptions));
   }
 
-  if (!IsCompiler()) {
+  if (!IsAotCompiler()) {
     // If we are already the compiler at this point, we must be dex2oat. Don't create the jit in
     // this case.
     // If runtime_options doesn't have UseJIT set to true then CreateFromRuntimeArguments returns
@@ -1551,14 +1551,14 @@
   if (!IsActiveTransaction()) {
     return false;
   } else {
-    DCHECK(IsCompiler());
+    DCHECK(IsAotCompiler());
     return preinitialization_transaction_->IsAborted();
   }
 }
 
 void Runtime::AbortTransactionAndThrowInternalError(Thread* self,
                                                     const std::string& abort_message) {
-  DCHECK(IsCompiler());
+  DCHECK(IsAotCompiler());
   DCHECK(IsActiveTransaction());
   // Throwing an exception may cause its class initialization. If we mark the transaction
   // aborted before that, we may warn with a false alarm. Throwing the exception before
@@ -1568,35 +1568,35 @@
 }
 
 void Runtime::ThrowInternalErrorForAbortedTransaction(Thread* self) {
-  DCHECK(IsCompiler());
+  DCHECK(IsAotCompiler());
   DCHECK(IsActiveTransaction());
   preinitialization_transaction_->ThrowInternalError(self, true);
 }
 
 void Runtime::RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset,
                                       uint8_t value, bool is_volatile) const {
-  DCHECK(IsCompiler());
+  DCHECK(IsAotCompiler());
   DCHECK(IsActiveTransaction());
   preinitialization_transaction_->RecordWriteFieldBoolean(obj, field_offset, value, is_volatile);
 }
 
 void Runtime::RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset,
                                    int8_t value, bool is_volatile) const {
-  DCHECK(IsCompiler());
+  DCHECK(IsAotCompiler());
   DCHECK(IsActiveTransaction());
   preinitialization_transaction_->RecordWriteFieldByte(obj, field_offset, value, is_volatile);
 }
 
 void Runtime::RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset,
                                    uint16_t value, bool is_volatile) const {
-  DCHECK(IsCompiler());
+  DCHECK(IsAotCompiler());
   DCHECK(IsActiveTransaction());
   preinitialization_transaction_->RecordWriteFieldChar(obj, field_offset, value, is_volatile);
 }
 
 void Runtime::RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset,
                                     int16_t value, bool is_volatile) const {
-  DCHECK(IsCompiler());
+  DCHECK(IsAotCompiler());
   DCHECK(IsActiveTransaction());
   preinitialization_transaction_->RecordWriteFieldShort(obj, field_offset, value, is_volatile);
 }
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index ec37699..6d99672 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -146,13 +146,14 @@
     return sizeof(LocationKind) + sizeof(int32_t);
   }
 
- private:
+  size_t Size() const {
+    return region_.size();
+  }
+
   static constexpr int kFixedSize = 0;
 
+ private:
   MemoryRegion region_;
-
-  friend class CodeInfo;
-  friend class StackMapStream;
 };
 
 /**
diff --git a/test/031-class-attributes/src/ClassAttrs.java b/test/031-class-attributes/src/ClassAttrs.java
index 977a05f..c2e41c5 100644
--- a/test/031-class-attributes/src/ClassAttrs.java
+++ b/test/031-class-attributes/src/ClassAttrs.java
@@ -330,8 +330,9 @@
         for (Type t : types) {
           typeStringList.add(t.toString());
         }
-        // Sort types alphabetically so they're always printed in the same order, whichever
-        // tool generated the DEX file of the test.
+        // Sort types alphabetically so they're always printed in the same order.
+        // For instance, Class.getClasses() does not guarantee any order for the
+        // returned Class[].
         Collections.sort(typeStringList);
 
         StringBuilder stb = new StringBuilder();