Fix some typos in ART.

Test: m build-art-host
Test: m cpplint-art
Change-Id: Ifc6ce3d0d645c4a8dca72dd483fc03fc05077130
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index faf8b41..c03ffca 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -284,7 +284,7 @@
       verification_results_(verification_results),
       compiler_(Compiler::Create(this, compiler_kind)),
       compiler_kind_(compiler_kind),
-      instruction_set_(instruction_set == kArm ? kThumb2: instruction_set),
+      instruction_set_(instruction_set == kArm ? kThumb2 : instruction_set),
       instruction_set_features_(instruction_set_features),
       requires_constructor_barrier_lock_("constructor barrier lock"),
       compiled_classes_lock_("compiled classes lock"),
@@ -1251,7 +1251,7 @@
     }
   }
 
-  // java.lang.Reference visitor for VisitReferences.
+  // java.lang.ref.Reference visitor for VisitReferences.
   void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED,
                   ObjPtr<mirror::Reference> ref ATTRIBUTE_UNUSED) const {}
 
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
index 8ca0ffe..ba654f4 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
@@ -160,7 +160,7 @@
     while (HasNext()) {
       ManagedRegister in_reg = CurrentParamRegister();
       if (!in_reg.IsNoRegister()) {
-        int32_t size = IsParamALongOrDouble(itr_args_)? 8 : 4;
+        int32_t size = IsParamALongOrDouble(itr_args_) ? 8 : 4;
         int32_t spill_offset = CurrentParamStackOffset().Uint32Value();
         ManagedRegisterSpill spill(in_reg, size, spill_offset);
         entry_spills_.push_back(spill);
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 4c96867..6c2934f 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -3332,7 +3332,7 @@
         InvokeRuntimeCallingConvention calling_convention;
         locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
         locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
-        // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
+        // Note: divmod will compute both the quotient and the remainder as the pair R0 and R1, but
         //       we only need the former.
         locations->SetOut(Location::RegisterLocation(R0));
       }
@@ -3459,7 +3459,7 @@
         InvokeRuntimeCallingConvention calling_convention;
         locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
         locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
-        // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
+        // Note: divmod will compute both the quotient and the remainder as the pair R0 and R1, but
         //       we only need the latter.
         locations->SetOut(Location::RegisterLocation(R1));
       }
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 68d0b86..8bd18ee 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -4655,7 +4655,7 @@
 }
 
 void InstructionCodeGeneratorARM64::VisitMonitorOperation(HMonitorOperation* instruction) {
-  codegen_->InvokeRuntime(instruction->IsEnter() ? kQuickLockObject: kQuickUnlockObject,
+  codegen_->InvokeRuntime(instruction->IsEnter() ? kQuickLockObject : kQuickUnlockObject,
                           instruction,
                           instruction->GetDexPc());
   if (instruction->IsEnter()) {
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 82338ff..64bcd62 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -3336,7 +3336,7 @@
         InvokeRuntimeCallingConventionARMVIXL calling_convention;
         locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
         locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
-        // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
+        // Note: divmod will compute both the quotient and the remainder as the pair R0 and R1, but
         //       we only need the former.
         locations->SetOut(LocationFrom(r0));
       }
@@ -3450,7 +3450,7 @@
         InvokeRuntimeCallingConventionARMVIXL calling_convention;
         locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
         locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
-        // Note: divrem will compute both the quotient and the remainder as the pair R0 and R1, but
+        // Note: divmod will compute both the quotient and the remainder as the pair R0 and R1, but
         //       we only need the latter.
         locations->SetOut(LocationFrom(r1));
       }
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 853c91f..5c561f5 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -1778,7 +1778,7 @@
         cond = X86Condition(condition->GetCondition());
       }
     } else {
-      // Must be a boolean condition, which needs to be compared to 0.
+      // Must be a Boolean condition, which needs to be compared to 0.
       Register cond_reg = locations->InAt(2).AsRegister<Register>();
       __ testl(cond_reg, cond_reg);
     }
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 74c71cc..c4caf4b 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1809,7 +1809,7 @@
         cond = X86_64IntegerCondition(condition->GetCondition());
       }
     } else {
-      // Must be a boolean condition, which needs to be compared to 0.
+      // Must be a Boolean condition, which needs to be compared to 0.
       CpuRegister cond_reg = locations->InAt(2).AsRegister<CpuRegister>();
       __ testl(cond_reg, cond_reg);
     }
@@ -4210,7 +4210,7 @@
 
 void CodeGeneratorX86_64::GenerateMemoryBarrier(MemBarrierKind kind) {
   /*
-   * According to the JSR-133 Cookbook, for x86 only StoreLoad/AnyAny barriers need memory fence.
+   * According to the JSR-133 Cookbook, for x86-64 only StoreLoad/AnyAny barriers need memory fence.
    * All other barriers (LoadAny, AnyStore, StoreStore) are nops due to the x86-64 memory model.
    * For those cases, all we need to ensure is that there is a scheduling barrier in place.
    */
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index f5931a2..c93bc21 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -399,7 +399,7 @@
   ArenaVector<ValueSet*> sets_;
 
   // BitVector which serves as a fast-access map from block id to
-  // visited/unvisited boolean.
+  // visited/unvisited Boolean.
   ArenaBitVector visited_blocks_;
 
   DISALLOW_COPY_AND_ASSIGN(GlobalValueNumberer);
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index ef8d74d..cac385c 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -1,4 +1,3 @@
-
 /*
  * Copyright (C) 2016 The Android Open Source Project
  *
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index f1ae549..6cf9b83 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -1878,7 +1878,7 @@
                         // If we use 'value' directly, we would lose 'value'
                         // in the case that the store fails.  Whether the
                         // store succeeds, or fails, it will load the
-                        // correct boolean value into the 'out' register.
+                        // correct Boolean value into the 'out' register.
   // This test isn't really necessary. We only support Primitive::kPrimInt,
   // Primitive::kPrimNot, and we already verified that we're working on one
   // of those two types. It's left here in case the code needs to support
diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc
index 3022e97..00a1fa1 100644
--- a/compiler/optimizing/intrinsics_mips64.cc
+++ b/compiler/optimizing/intrinsics_mips64.cc
@@ -1477,7 +1477,7 @@
                         // If we use 'value' directly, we would lose 'value'
                         // in the case that the store fails.  Whether the
                         // store succeeds, or fails, it will load the
-                        // correct boolean value into the 'out' register.
+                        // correct Boolean value into the 'out' register.
   if (type == Primitive::kPrimLong) {
     __ Scd(out, TMP);
   } else {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index a2980dc..f0ea9e2 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -565,7 +565,7 @@
   ArtMethod* GetArtMethod() const { return art_method_; }
   void SetArtMethod(ArtMethod* method) { art_method_ = method; }
 
-  // Returns an instruction with the opposite boolean value from 'cond'.
+  // Returns an instruction with the opposite Boolean value from 'cond'.
   // The instruction has been inserted into the graph, either as a constant, or
   // before cursor.
   HInstruction* InsertOppositeCondition(HInstruction* cond, HInstruction* cursor);
diff --git a/dexlayout/dex_ir.h b/dexlayout/dex_ir.h
index a2d1190..e2ee940 100644
--- a/dexlayout/dex_ir.h
+++ b/dexlayout/dex_ir.h
@@ -741,7 +741,7 @@
   uint32_t GetAccessFlags() const { return access_flags_; }
   const TypeId* Superclass() const { return superclass_; }
   const TypeIdVector* Interfaces()
-      { return interfaces_ == nullptr ? nullptr: interfaces_->GetTypeList(); }
+      { return interfaces_ == nullptr ? nullptr : interfaces_->GetTypeList(); }
   uint32_t InterfacesOffset() { return interfaces_ == nullptr ? 0 : interfaces_->GetOffset(); }
   const StringId* SourceFile() const { return source_file_; }
   AnnotationsDirectoryItem* Annotations() const { return annotations_; }
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 3cf900e..92f1e18 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -1784,7 +1784,7 @@
         os << StringPrintf("%d (0x%x)\n", field->GetShort(obj), field->GetShort(obj));
         break;
       case Primitive::kPrimBoolean:
-        os << StringPrintf("%s (0x%x)\n", field->GetBoolean(obj)? "true" : "false",
+        os << StringPrintf("%s (0x%x)\n", field->GetBoolean(obj) ? "true" : "false",
             field->GetBoolean(obj));
         break;
       case Primitive::kPrimByte:
diff --git a/runtime/arch/arm/quick_method_frame_info_arm.h b/runtime/arch/arm/quick_method_frame_info_arm.h
index 4b23c77..35f1948 100644
--- a/runtime/arch/arm/quick_method_frame_info_arm.h
+++ b/runtime/arch/arm/quick_method_frame_info_arm.h
@@ -62,7 +62,7 @@
 
 constexpr uint32_t ArmCalleeSaveFpSpills(Runtime::CalleeSaveType type) {
   return kArmCalleeSaveFpAlwaysSpills | kArmCalleeSaveFpRefSpills |
-      (type == Runtime::kSaveRefsAndArgs ? kArmCalleeSaveFpArgSpills: 0) |
+      (type == Runtime::kSaveRefsAndArgs ? kArmCalleeSaveFpArgSpills : 0) |
       (type == Runtime::kSaveAllCalleeSaves ? kArmCalleeSaveFpAllSpills : 0) |
       (type == Runtime::kSaveEverything ? kArmCalleeSaveFpEverythingSpills : 0);
 }
diff --git a/runtime/arch/arm64/quick_method_frame_info_arm64.h b/runtime/arch/arm64/quick_method_frame_info_arm64.h
index 36f283b..32d9d08 100644
--- a/runtime/arch/arm64/quick_method_frame_info_arm64.h
+++ b/runtime/arch/arm64/quick_method_frame_info_arm64.h
@@ -85,7 +85,7 @@
 
 constexpr uint32_t Arm64CalleeSaveFpSpills(Runtime::CalleeSaveType type) {
   return kArm64CalleeSaveFpAlwaysSpills | kArm64CalleeSaveFpRefSpills |
-      (type == Runtime::kSaveRefsAndArgs ? kArm64CalleeSaveFpArgSpills: 0) |
+      (type == Runtime::kSaveRefsAndArgs ? kArm64CalleeSaveFpArgSpills : 0) |
       (type == Runtime::kSaveAllCalleeSaves ? kArm64CalleeSaveFpAllSpills : 0) |
       (type == Runtime::kSaveEverything ? kArm64CalleeSaveFpEverythingSpills : 0);
 }
diff --git a/runtime/arch/mips64/quick_method_frame_info_mips64.h b/runtime/arch/mips64/quick_method_frame_info_mips64.h
index 397776e..d774473 100644
--- a/runtime/arch/mips64/quick_method_frame_info_mips64.h
+++ b/runtime/arch/mips64/quick_method_frame_info_mips64.h
@@ -78,7 +78,7 @@
 
 constexpr uint32_t Mips64CalleeSaveFpSpills(Runtime::CalleeSaveType type) {
   return kMips64CalleeSaveFpRefSpills |
-      (type == Runtime::kSaveRefsAndArgs ? kMips64CalleeSaveFpArgSpills: 0) |
+      (type == Runtime::kSaveRefsAndArgs ? kMips64CalleeSaveFpArgSpills : 0) |
       (type == Runtime::kSaveAllCalleeSaves ? kMips64CalleeSaveFpAllSpills : 0) |
       (type == Runtime::kSaveEverything ? kMips64CalleeSaveFpEverythingSpills : 0);
 }
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 7d4b158..7b6c0dc 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -487,7 +487,7 @@
       // says AccessibleObject is 9 bytes but sizeof(AccessibleObject) is 12 bytes due to padding.
       // The RoundUp is to get around this case.
       static constexpr size_t kPackAlignment = 4;
-      size_t expected_size = RoundUp(is_static ? klass->GetClassSize(): klass->GetObjectSize(),
+      size_t expected_size = RoundUp(is_static ? klass->GetClassSize() : klass->GetObjectSize(),
           kPackAlignment);
       if (sizeof(T) != expected_size) {
         LOG(ERROR) << "Class size mismatch:"
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 34d8284..268cca0 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -360,7 +360,7 @@
     // If we are the zygote, the non moving space becomes the zygote space when we run
     // PreZygoteFork the first time. In this case, call the map "zygote space" since we can't
     // rename the mem map later.
-    const char* space_name = is_zygote ? kZygoteSpaceName: kNonMovingSpaceName;
+    const char* space_name = is_zygote ? kZygoteSpaceName : kNonMovingSpaceName;
     // Reserve the non moving mem map before the other two since it needs to be at a specific
     // address.
     non_moving_space_mem_map.reset(
diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h
index b15544d..38b68cb 100644
--- a/runtime/gc/reference_processor.h
+++ b/runtime/gc/reference_processor.h
@@ -42,7 +42,7 @@
 
 class Heap;
 
-// Used to process java.lang.References concurrently or paused.
+// Used to process java.lang.ref.Reference instances concurrently or paused.
 class ReferenceProcessor {
  public:
   explicit ReferenceProcessor();
diff --git a/runtime/gc/scoped_gc_critical_section.h b/runtime/gc/scoped_gc_critical_section.h
index ec93bca..1271ff7 100644
--- a/runtime/gc/scoped_gc_critical_section.h
+++ b/runtime/gc/scoped_gc_critical_section.h
@@ -27,8 +27,8 @@
 
 namespace gc {
 
-// Wait until the GC is finished and then prevent GC from starting until the destructor. Used
-// to prevent deadlocks in places where we call ClassLinker::VisitClass with all th threads
+// Wait until the GC is finished and then prevent the GC from starting until the destructor. Used
+// to prevent deadlocks in places where we call ClassLinker::VisitClass with all the threads
 // suspended.
 class ScopedGCCriticalSection {
  public:
diff --git a/runtime/interpreter/mterp/arm64/op_iput_wide_quick.S b/runtime/interpreter/mterp/arm64/op_iput_wide_quick.S
index 6cec363..28e831a 100644
--- a/runtime/interpreter/mterp/arm64/op_iput_wide_quick.S
+++ b/runtime/interpreter/mterp/arm64/op_iput_wide_quick.S
@@ -4,7 +4,7 @@
     GET_VREG w2, w2                     // w2<- fp[B], the object pointer
     ubfx    w0, wINST, #8, #4           // w0<- A
     cbz     w2, common_errNullObject    // object was null
-    GET_VREG_WIDE x0, w0                // x0-< fp[A]
+    GET_VREG_WIDE x0, w0                // x0<- fp[A]
     FETCH_ADVANCE_INST 2                // advance rPC, load wINST
     str     x0, [x2, x3]                // obj.field<- x0
     GET_INST_OPCODE ip                  // extract opcode from wINST
diff --git a/runtime/interpreter/mterp/out/mterp_arm64.S b/runtime/interpreter/mterp/out/mterp_arm64.S
index 681790d..7d442c0 100644
--- a/runtime/interpreter/mterp/out/mterp_arm64.S
+++ b/runtime/interpreter/mterp/out/mterp_arm64.S
@@ -6593,7 +6593,7 @@
     GET_VREG w2, w2                     // w2<- fp[B], the object pointer
     ubfx    w0, wINST, #8, #4           // w0<- A
     cbz     w2, common_errNullObject    // object was null
-    GET_VREG_WIDE x0, w0                // x0-< fp[A]
+    GET_VREG_WIDE x0, w0                // x0<- fp[A]
     FETCH_ADVANCE_INST 2                // advance rPC, load wINST
     str     x0, [x2, x3]                // obj.field<- x0
     GET_INST_OPCODE ip                  // extract opcode from wINST
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index 7dd3d3d..feb6e08 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -1443,7 +1443,7 @@
 
   ObjPtr<mirror::Object> java_method_obj = shadow_frame->GetVRegReference(arg_offset);
   ScopedLocalRef<jobject> java_method(env,
-      java_method_obj == nullptr ? nullptr :env->AddLocalReference<jobject>(java_method_obj));
+      java_method_obj == nullptr ? nullptr : env->AddLocalReference<jobject>(java_method_obj));
 
   ObjPtr<mirror::Object> java_receiver_obj = shadow_frame->GetVRegReference(arg_offset + 1);
   ScopedLocalRef<jobject> java_receiver(env,