am 0cd7ec2d: Fix cpplint whitespace/blank_line issues

* commit '0cd7ec2dcd8d7ba30bf3ca420b40dac52849876c':
  Fix cpplint whitespace/blank_line issues
diff --git a/Android.mk b/Android.mk
index 27bd894..971eb2f 100644
--- a/Android.mk
+++ b/Android.mk
@@ -334,15 +334,15 @@
 .PHONY: cpplint-art
 cpplint-art:
 	./art/tools/cpplint.py \
-	    --filter=-,+build/header_guard,+whitespace/braces,+whitespace/comma,+runtime/explicit,+whitespace/newline \
-	    $(shell find art -name *.h -o -name *$(ART_CPP_EXTENSION))
+	    --filter=-,+build/header_guard,+whitespace/braces,+whitespace/comma,+runtime/explicit,+whitespace/newline,+whitespace/parens \
+	    $(shell find art -name *.h -o -name *$(ART_CPP_EXTENSION) | grep -v art/compiler/llvm/generated/)
 
 # "mm cpplint-art-aspirational" to see warnings we would like to fix
 .PHONY: cpplint-art-aspirational
 cpplint-art-aspirational:
 	./art/tools/cpplint.py \
 	    --filter=-whitespace/comments,-whitespace/line_length,-build/include,-readability/function,-readability/streams,-readability/todo,-runtime/references \
-	    $(shell find art -name *.h -o -name *$(ART_CPP_EXTENSION))
+	    $(shell find art -name *.h -o -name *$(ART_CPP_EXTENSION) | grep -v art/compiler/llvm/generated/)
 
 ########################################################################
 # targets to switch back and forth from libdvm to libart
diff --git a/compiler/dex/arena_allocator.h b/compiler/dex/arena_allocator.h
index 0ad859e..cd2141a 100644
--- a/compiler/dex/arena_allocator.h
+++ b/compiler/dex/arena_allocator.h
@@ -28,7 +28,6 @@
 
 class ArenaAllocator {
   public:
-
     // Type of allocation for memory tuning.
     enum ArenaAllocKind {
       kAllocMisc,
@@ -57,7 +56,6 @@
   void DumpMemStats(std::ostream& os) const;
 
   private:
-
     // Variable-length allocation block.
     struct ArenaMemBlock {
       size_t block_size;
@@ -77,7 +75,6 @@
     uint32_t alloc_stats_[kNumAllocKinds];   // Bytes used by various allocation kinds.
     uint32_t lost_bytes_;                    // Lost memory at end of too-small region
     uint32_t num_allocations_;
-
 };  // ArenaAllocator
 
 
diff --git a/compiler/dex/arena_bit_vector.h b/compiler/dex/arena_bit_vector.h
index 7e5c436..de30859 100644
--- a/compiler/dex/arena_bit_vector.h
+++ b/compiler/dex/arena_bit_vector.h
@@ -30,7 +30,6 @@
  */
 class ArenaBitVector {
   public:
-
     class Iterator {
       public:
         explicit Iterator(ArenaBitVector* bit_vector)
diff --git a/compiler/dex/backend.h b/compiler/dex/backend.h
index 7fa8e99..acfec42 100644
--- a/compiler/dex/backend.h
+++ b/compiler/dex/backend.h
@@ -23,7 +23,6 @@
 namespace art {
 
 class Backend {
-
   public:
     virtual ~Backend() {};
     virtual void Materialize() = 0;
@@ -32,7 +31,6 @@
   protected:
     explicit Backend(ArenaAllocator* arena) : arena_(arena) {};
     ArenaAllocator* const arena_;
-
 };  // Class Backend
 
 }  // namespace art
diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h
index 1946869..e427862 100644
--- a/compiler/dex/dataflow_iterator.h
+++ b/compiler/dex/dataflow_iterator.h
@@ -41,7 +41,6 @@
    */
   class DataflowIterator {
     public:
-
       virtual ~DataflowIterator() {}
 
       // Return the next BasicBlock* to visit.
@@ -81,7 +80,6 @@
       GrowableArray<int>* block_id_list_;
       int idx_;
       bool changed_;
-
   }; // DataflowIterator
 
   class ReachableNodesIterator : public DataflowIterator {
@@ -106,7 +104,6 @@
 
   class PostOrderDfsIterator : public DataflowIterator {
     public:
-
       PostOrderDfsIterator(MIRGraph* mir_graph, bool is_iterative)
           : DataflowIterator(mir_graph, is_iterative, 0,
                              mir_graph->GetNumReachableBlocks(), false) {
diff --git a/compiler/dex/growable_array.h b/compiler/dex/growable_array.h
index 6d26bc2..3bfbcd4 100644
--- a/compiler/dex/growable_array.h
+++ b/compiler/dex/growable_array.h
@@ -46,7 +46,6 @@
 template<typename T>
 class GrowableArray {
   public:
-
     class Iterator {
       public:
         explicit Iterator(GrowableArray* g_list)
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index b783f3e..35d2923 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -509,7 +509,6 @@
         AdvanceMemoryVersion(NO_VALUE, field_ref);
       }
       break;
-
   }
   return res;
 }
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
index 09ed7ae..d29600a 100644
--- a/compiler/dex/local_value_numbering.h
+++ b/compiler/dex/local_value_numbering.h
@@ -135,7 +135,6 @@
   ValueMap value_map_;
   MemoryVersionMap memory_version_map_;
   std::set<uint16_t> null_checked_;
-
 };
 
 } // namespace art
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index a9af477..0b3fa46 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -804,7 +804,6 @@
 
       if (bb->successor_block_list.block_list_type == kPackedSwitch ||
           bb->successor_block_list.block_list_type == kSparseSwitch) {
-
         GrowableArray<SuccessorBlockInfo*>::Iterator iter(bb->successor_block_list.blocks);
 
         succ_id = 0;
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index f86e130..f6011e0 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -553,7 +553,6 @@
    static const char* extended_mir_op_names_[kMirOpLast - kMirOpFirst];
 
  private:
-
    int FindCommonParent(int block1, int block2);
    void ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src1,
                           const ArenaBitVector* src2);
diff --git a/compiler/dex/portable/mir_to_gbc.cc b/compiler/dex/portable/mir_to_gbc.cc
index 4317d1e..cfd3daf 100644
--- a/compiler/dex/portable/mir_to_gbc.cc
+++ b/compiler/dex/portable/mir_to_gbc.cc
@@ -74,7 +74,6 @@
   ::llvm::Instruction* inst = ::llvm::dyn_cast< ::llvm::Instruction>(placeholder);
   DCHECK(inst != NULL);
   inst->eraseFromParent();
-
 }
 
 void MirConverter::DefineValue(::llvm::Value* val, int s_reg) {
@@ -1580,8 +1579,7 @@
 
 /* Extended MIR instructions like PHI */
 void MirConverter::ConvertExtendedMIR(BasicBlock* bb, MIR* mir,
-                               ::llvm::BasicBlock* llvm_bb) {
-
+                                      ::llvm::BasicBlock* llvm_bb) {
   switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) {
     case kMirOpPhi: {
       // The llvm Phi node already emitted - just DefineValue() here.
@@ -1706,7 +1704,6 @@
   HandlePhiNodes(bb, llvm_bb);
 
   for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
-
     SetDexOffset(mir->offset);
 
     int opcode = mir->dalvikInsn.opcode;
@@ -1795,7 +1792,6 @@
 }
 
 ::llvm::FunctionType* MirConverter::GetFunctionType() {
-
   // Get return type
   ::llvm::Type* ret_type = irb_->getJType(RemapShorty(cu_->shorty[0]));
 
diff --git a/compiler/dex/portable/mir_to_gbc.h b/compiler/dex/portable/mir_to_gbc.h
index 2786314..2b681f6 100644
--- a/compiler/dex/portable/mir_to_gbc.h
+++ b/compiler/dex/portable/mir_to_gbc.h
@@ -41,7 +41,6 @@
                                llvm::LlvmCompilationUnit* const llvm_compilation_unit);
 
 class MirConverter : public Backend {
-
   public:
     // TODO: flesh out and integrate into new world order.
     MirConverter(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena,
diff --git a/compiler/dex/quick/arm/assemble_arm.cc b/compiler/dex/quick/arm/assemble_arm.cc
index f4aa1f3..0649c9f 100644
--- a/compiler/dex/quick/arm/assemble_arm.cc
+++ b/compiler/dex/quick/arm/assemble_arm.cc
@@ -1007,7 +1007,6 @@
   AssemblerStatus res = kSuccess;  // Assume success
 
   for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) {
-
     if (lir->opcode < 0) {
       /* 1 means padding is needed */
       if ((lir->opcode == kPseudoPseudoAlign4) && (lir->operands[0] == 1)) {
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index e169dc8..8698b1f 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -969,7 +969,6 @@
 
   /* Method is not empty */
   if (first_lir_insn_) {
-
     // mark the targets of switch statement case labels
     ProcessSwitchTables();
 
@@ -979,9 +978,7 @@
     if (cu_->verbose) {
       CodegenDump();
     }
-
   }
-
 }
 
 CompiledMethod* Mir2Lir::GetCompiledMethod() {
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 14e395c..fd8f86b 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -736,7 +736,6 @@
                                 const MethodReference& target_method,
                                 uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method,
                                 InvokeType type, bool skip_this) {
-
   // If we can treat it as non-range (Jumbo ops will use range form)
   if (info->num_arg_words <= 5)
     return GenDalvikArgsNoRange(info, call_state, pcrLabel,
diff --git a/compiler/dex/quick/local_optimizations.cc b/compiler/dex/quick/local_optimizations.cc
index eb27bf8..2e9c845 100644
--- a/compiler/dex/quick/local_optimizations.cc
+++ b/compiler/dex/quick/local_optimizations.cc
@@ -73,11 +73,14 @@
 void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) {
   LIR* this_lir;
 
-  if (head_lir == tail_lir) return;
+  if (head_lir == tail_lir) {
+    return;
+  }
 
   for (this_lir = PREV_LIR(tail_lir); this_lir != head_lir; this_lir = PREV_LIR(this_lir)) {
-
-    if (is_pseudo_opcode(this_lir->opcode)) continue;
+    if (is_pseudo_opcode(this_lir->opcode)) {
+      continue;
+    }
 
     int sink_distance = 0;
 
@@ -110,7 +113,9 @@
      * Currently only eliminate redundant ld/st for constant and Dalvik
      * register accesses.
      */
-    if (!(this_mem_mask & (ENCODE_LITERAL | ENCODE_DALVIK_REG))) continue;
+    if (!(this_mem_mask & (ENCODE_LITERAL | ENCODE_DALVIK_REG))) {
+      continue;
+    }
 
     uint64_t stop_def_reg_mask = this_lir->def_mask & ~ENCODE_MEM;
     uint64_t stop_use_reg_mask;
@@ -127,12 +132,13 @@
     }
 
     for (check_lir = NEXT_LIR(this_lir); check_lir != tail_lir; check_lir = NEXT_LIR(check_lir)) {
-
       /*
        * Skip already dead instructions (whose dataflow information is
        * outdated and misleading).
        */
-      if (check_lir->flags.is_nop || is_pseudo_opcode(check_lir->opcode)) continue;
+      if (check_lir->flags.is_nop || is_pseudo_opcode(check_lir->opcode)) {
+        continue;
+      }
 
       uint64_t check_mem_mask = (check_lir->use_mask | check_lir->def_mask) & ENCODE_MEM;
       uint64_t alias_condition = this_mem_mask & check_mem_mask;
@@ -274,12 +280,15 @@
   LIR* prev_inst_list[MAX_HOIST_DISTANCE];
 
   /* Empty block */
-  if (head_lir == tail_lir) return;
+  if (head_lir == tail_lir) {
+    return;
+  }
 
   /* Start from the second instruction */
   for (this_lir = NEXT_LIR(head_lir); this_lir != tail_lir; this_lir = NEXT_LIR(this_lir)) {
-
-    if (is_pseudo_opcode(this_lir->opcode)) continue;
+    if (is_pseudo_opcode(this_lir->opcode)) {
+      continue;
+    }
 
     uint64_t target_flags = GetTargetInstFlags(this_lir->opcode);
     /* Skip non-interesting instructions */
@@ -312,12 +321,13 @@
 
     /* Try to hoist the load to a good spot */
     for (check_lir = PREV_LIR(this_lir); check_lir != head_lir; check_lir = PREV_LIR(check_lir)) {
-
       /*
        * Skip already dead instructions (whose dataflow information is
        * outdated and misleading).
        */
-      if (check_lir->flags.is_nop) continue;
+      if (check_lir->flags.is_nop) {
+        continue;
+      }
 
       uint64_t check_mem_mask = check_lir->def_mask & ENCODE_MEM;
       uint64_t alias_condition = stop_use_all_mask & check_mem_mask;
@@ -355,7 +365,9 @@
        */
       if (stop_here || !is_pseudo_opcode(check_lir->opcode)) {
         prev_inst_list[next_slot++] = check_lir;
-        if (next_slot == MAX_HOIST_DISTANCE) break;
+        if (next_slot == MAX_HOIST_DISTANCE) {
+          break;
+        }
       }
 
       /* Found a new place to put the load - move it here */
@@ -400,12 +412,16 @@
            * If the first instruction is a load, don't hoist anything
            * above it since it is unlikely to be beneficial.
            */
-          if (GetTargetInstFlags(cur_lir->opcode) & IS_LOAD) continue;
+          if (GetTargetInstFlags(cur_lir->opcode) & IS_LOAD) {
+            continue;
+          }
           /*
            * If the remaining number of slots is less than LD_LATENCY,
            * insert the hoisted load here.
            */
-          if (slot < LD_LATENCY) break;
+          if (slot < LD_LATENCY) {
+            break;
+          }
         }
 
         // Don't look across a barrier label
@@ -461,7 +477,6 @@
   LIR* this_lir;
 
   for (this_lir = first_lir_insn_; this_lir != last_lir_insn_; this_lir = NEXT_LIR(this_lir)) {
-
     /* Branch to the next instruction */
     if (IsUnconditionalBranch(this_lir)) {
       LIR* next_lir = this_lir;
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 376ad7f..802ff62 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -24,7 +24,6 @@
 
 class MipsMir2Lir : public Mir2Lir {
   public:
-
     MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
 
     // Required for target - codegen utilities.
@@ -175,7 +174,6 @@
 
   private:
     void ConvertShortToLongBranch(LIR* lir);
-
 };
 
 }  // namespace art
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index abb687c..41e5a2d 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -166,7 +166,6 @@
 #define is_pseudo_opcode(opcode) (static_cast<int>(opcode) < 0)
 
 class Mir2Lir : public Backend {
-
   public:
     struct SwitchTable {
       int offset;
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 4fa9dfb..edb5ae5 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -24,7 +24,6 @@
 
 class X86Mir2Lir : public Mir2Lir {
   public:
-
     X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
 
     // Required for target - codegen helpers.
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index ccd2454..3a0cbcc 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -46,9 +46,13 @@
         GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_block_list.blocks);
         while (true) {
           SuccessorBlockInfo *sbi = iterator.Next();
-          if (sbi == NULL) break;
+          if (sbi == NULL) {
+            break;
+          }
           res = NeedsVisit(sbi->block);
-          if (res != NULL) break;
+          if (res != NULL) {
+            break;
+          }
         }
       }
     }
@@ -112,12 +116,16 @@
  * register idx is defined in BasicBlock bb.
  */
 bool MIRGraph::FillDefBlockMatrix(BasicBlock* bb) {
-  if (bb->data_flow_info == NULL) return false;
+  if (bb->data_flow_info == NULL) {
+    return false;
+  }
 
   ArenaBitVector::Iterator iterator(bb->data_flow_info->def_v);
   while (true) {
     int idx = iterator.Next();
-    if (idx == -1) break;
+    if (idx == -1) {
+      break;
+    }
     /* Block bb defines register idx */
     def_block_matrix_[idx]->SetBit(bb->id);
   }
@@ -222,7 +230,9 @@
     GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_block_list.blocks);
       while (true) {
         SuccessorBlockInfo *successor_block_info = iterator.Next();
-        if (successor_block_info == NULL) break;
+        if (successor_block_info == NULL) {
+          break;
+        }
         BasicBlock* succ_bb = successor_block_info->block;
         CheckForDominanceFrontier(bb, succ_bb);
       }
@@ -233,13 +243,17 @@
   while (true) {
     //TUNING: hot call to BitVectorIteratorNext
     int dominated_idx = bv_iterator.Next();
-    if (dominated_idx == -1) break;
+    if (dominated_idx == -1) {
+      break;
+    }
     BasicBlock* dominated_bb = GetBasicBlock(dominated_idx);
     ArenaBitVector::Iterator df_iterator(dominated_bb->dom_frontier);
     while (true) {
       //TUNING: hot call to BitVectorIteratorNext
       int df_up_idx = df_iterator.Next();
-      if (df_up_idx == -1) break;
+      if (df_up_idx == -1) {
+        break;
+      }
       BasicBlock* df_up_block = GetBasicBlock(df_up_idx);
       CheckForDominanceFrontier(bb, df_up_block);
     }
@@ -313,7 +327,9 @@
   /* Scan the rest of the predecessors */
   while (true) {
       BasicBlock* pred_bb = iter.Next();
-      if (!pred_bb) break;
+      if (!pred_bb) {
+        break;
+      }
       if (i_dom_list_[pred_bb->dfs_id] == NOTVISITED) {
         continue;
       } else {
@@ -443,7 +459,9 @@
 bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb) {
   ArenaBitVector* temp_dalvik_register_v = temp_dalvik_register_v_;
 
-  if (bb->data_flow_info == NULL) return false;
+  if (bb->data_flow_info == NULL) {
+    return false;
+  }
   temp_dalvik_register_v->Copy(bb->data_flow_info->live_in_v);
   if (bb->taken && bb->taken->data_flow_info)
     ComputeSuccLineIn(temp_dalvik_register_v, bb->taken->data_flow_info->live_in_v,
@@ -455,7 +473,9 @@
     GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_block_list.blocks);
     while (true) {
       SuccessorBlockInfo *successor_block_info = iterator.Next();
-      if (successor_block_info == NULL) break;
+      if (successor_block_info == NULL) {
+        break;
+      }
       BasicBlock* succ_bb = successor_block_info->block;
       if (succ_bb->data_flow_info) {
         ComputeSuccLineIn(temp_dalvik_register_v, succ_bb->data_flow_info->live_in_v,
@@ -504,25 +524,27 @@
 
       while (true) {
         int idx = iterator.Next();
-        if (idx == -1) break;
-          BasicBlock* def_bb = GetBasicBlock(idx);
-
-          /* Merge the dominance frontier to tmp_blocks */
-          //TUNING: hot call to Union().
-          if (def_bb->dom_frontier != NULL) {
-            tmp_blocks->Union(def_bb->dom_frontier);
-          }
+        if (idx == -1) {
+          break;
         }
-        if (!phi_blocks->Equal(tmp_blocks)) {
-          change = true;
-          phi_blocks->Copy(tmp_blocks);
+        BasicBlock* def_bb = GetBasicBlock(idx);
 
-          /*
-           * Iterate through the original blocks plus the new ones in
-           * the dominance frontier.
-           */
-          input_blocks->Copy(phi_blocks);
-          input_blocks->Union(def_block_matrix_[dalvik_reg]);
+        /* Merge the dominance frontier to tmp_blocks */
+        //TUNING: hot call to Union().
+        if (def_bb->dom_frontier != NULL) {
+          tmp_blocks->Union(def_bb->dom_frontier);
+        }
+      }
+      if (!phi_blocks->Equal(tmp_blocks)) {
+        change = true;
+        phi_blocks->Copy(tmp_blocks);
+
+        /*
+         * Iterate through the original blocks plus the new ones in
+         * the dominance frontier.
+         */
+        input_blocks->Copy(phi_blocks);
+        input_blocks->Union(def_block_matrix_[dalvik_reg]);
       }
     } while (change);
 
@@ -533,10 +555,14 @@
     ArenaBitVector::Iterator iterator(phi_blocks);
     while (true) {
       int idx = iterator.Next();
-      if (idx == -1) break;
+      if (idx == -1) {
+        break;
+      }
       BasicBlock* phi_bb = GetBasicBlock(idx);
       /* Variable will be clobbered before being used - no need for phi */
-      if (!phi_bb->data_flow_info->live_in_v->IsBitSet(dalvik_reg)) continue;
+      if (!phi_bb->data_flow_info->live_in_v->IsBitSet(dalvik_reg)) {
+        continue;
+      }
       MIR *phi =
           static_cast<MIR*>(arena_->NewMem(sizeof(MIR), true, ArenaAllocator::kAllocDFInfo));
       phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpPhi);
@@ -572,7 +598,9 @@
     GrowableArray<BasicBlock*>::Iterator iter(bb->predecessors);
     while (true) {
       BasicBlock* pred_bb = iter.Next();
-      if (!pred_bb) break;
+      if (!pred_bb) {
+        break;
+      }
       int ssa_reg = pred_bb->data_flow_info->vreg_to_ssa_map[v_reg];
       uses.push_back(ssa_reg);
       incoming_arc.push_back(pred_bb->id);
@@ -605,8 +633,9 @@
 }
 
 void MIRGraph::DoDFSPreOrderSSARename(BasicBlock* block) {
-
-  if (block->visited || block->hidden) return;
+  if (block->visited || block->hidden) {
+    return;
+  }
   block->visited = true;
 
   /* Process this block */
@@ -632,7 +661,9 @@
     GrowableArray<SuccessorBlockInfo*>::Iterator iterator(block->successor_block_list.blocks);
     while (true) {
       SuccessorBlockInfo *successor_block_info = iterator.Next();
-      if (successor_block_info == NULL) break;
+      if (successor_block_info == NULL) {
+        break;
+      }
       BasicBlock* succ_bb = successor_block_info->block;
       DoDFSPreOrderSSARename(succ_bb);
       /* Restore SSA map snapshot */
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 67adae6..3d6b571 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -1402,7 +1402,6 @@
   }
 
  private:
-
   class ForAllClosure : public Task {
    public:
     ForAllClosure(ParallelCompilationManager* manager, size_t begin, size_t end, Callback* callback,
@@ -1423,6 +1422,7 @@
     virtual void Finalize() {
       delete this;
     }
+
    private:
     const ParallelCompilationManager* const manager_;
     const size_t begin_;
diff --git a/compiler/elf_writer_mclinker.cc b/compiler/elf_writer_mclinker.cc
index 472a606..05f3b02 100644
--- a/compiler/elf_writer_mclinker.cc
+++ b/compiler/elf_writer_mclinker.cc
@@ -307,7 +307,6 @@
   // TODO: ownership of libm_lib_input?
   mcld::Input* libm_lib_input_input = ir_builder_->ReadInput(libm_lib, libm_lib);
   CHECK(libm_lib_input_input != NULL);
-
 }
 #endif
 
diff --git a/compiler/elf_writer_mclinker.h b/compiler/elf_writer_mclinker.h
index 468fa9a..3b33bc4 100644
--- a/compiler/elf_writer_mclinker.h
+++ b/compiler/elf_writer_mclinker.h
@@ -38,7 +38,6 @@
 
 class ElfWriterMclinker : public ElfWriter {
  public:
-
   // Write an ELF file. Returns true on success, false on failure.
   static bool Create(File* file,
                      std::vector<uint8_t>& oat_contents,
diff --git a/compiler/elf_writer_test.cc b/compiler/elf_writer_test.cc
index 4a02b61..e48806e 100644
--- a/compiler/elf_writer_test.cc
+++ b/compiler/elf_writer_test.cc
@@ -22,7 +22,6 @@
 namespace art {
 
 class ElfWriterTest : public CommonTest {
-
  protected:
   virtual void SetUp() {
     ReserveImageSpace();
diff --git a/compiler/jni/portable/jni_compiler.cc b/compiler/jni/portable/jni_compiler.cc
index 44d0c2d..57b8a31 100644
--- a/compiler/jni/portable/jni_compiler.cc
+++ b/compiler/jni/portable/jni_compiler.cc
@@ -46,11 +46,10 @@
 JniCompiler::JniCompiler(LlvmCompilationUnit* cunit,
                          const CompilerDriver& driver,
                          const DexCompilationUnit* dex_compilation_unit)
-: cunit_(cunit), driver_(&driver), module_(cunit_->GetModule()),
-  context_(cunit_->GetLLVMContext()), irb_(*cunit_->GetIRBuilder()),
-  dex_compilation_unit_(dex_compilation_unit),
-  func_(NULL), elf_func_idx_(0) {
-
+    : cunit_(cunit), driver_(&driver), module_(cunit_->GetModule()),
+      context_(cunit_->GetLLVMContext()), irb_(*cunit_->GetIRBuilder()),
+      dex_compilation_unit_(dex_compilation_unit),
+      func_(NULL), elf_func_idx_(0) {
   // Check: Ensure that JNI compiler will only get "native" method
   CHECK(dex_compilation_unit->IsNative());
 }
diff --git a/compiler/jni/quick/x86/calling_convention_x86.cc b/compiler/jni/quick/x86/calling_convention_x86.cc
index b671bd1..45dd429 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.cc
+++ b/compiler/jni/quick/x86/calling_convention_x86.cc
@@ -159,7 +159,6 @@
   // count JNIEnv* and return pc (pushed after Method*)
   size_t total_args = static_args + param_args + 2;
   return total_args;
-
 }
 
 }  // namespace x86
diff --git a/compiler/llvm/gbc_expander.cc b/compiler/llvm/gbc_expander.cc
index b139e32..94cc973 100644
--- a/compiler/llvm/gbc_expander.cc
+++ b/compiler/llvm/gbc_expander.cc
@@ -361,7 +361,6 @@
 
   llvm::Value* ExpandIntrinsic(IntrinsicHelper::IntrinsicId intr_id,
                                llvm::CallInst& call_inst);
-
 };
 
 char GBCExpanderPass::ID = 0;
@@ -710,7 +709,6 @@
                                    art::mirror::Array::LengthOffset().Int32Value(),
                                    irb_.getJIntTy(),
                                    kTBAAConstJObject);
-
 }
 
 llvm::Value*
@@ -751,7 +749,6 @@
 llvm::Value* GBCExpanderPass::EmitArrayGEP(llvm::Value* array_addr,
                                            llvm::Value* index_value,
                                            JType elem_jty) {
-
   int data_offset;
   if (elem_jty == kLong || elem_jty == kDouble ||
       (elem_jty == kObject && sizeof(uint64_t) == sizeof(art::mirror::Object*))) {
@@ -1426,7 +1423,6 @@
 
 llvm::Value* GBCExpanderPass::EmitCompareResultSelection(llvm::Value* cmp_eq,
                                                          llvm::Value* cmp_lt) {
-
   llvm::Constant* zero = irb_.getJInt(0);
   llvm::Constant* pos1 = irb_.getJInt(1);
   llvm::Constant* neg1 = irb_.getJInt(-1);
@@ -2437,7 +2433,6 @@
                                          llvm::Value* this_addr,
                                          uint32_t dex_pc,
                                          bool is_fast_path) {
-
   llvm::Function* runtime_func = NULL;
 
   switch (invoke_type) {
diff --git a/compiler/llvm/ir_builder.h b/compiler/llvm/ir_builder.h
index 65da005..c81ba27 100644
--- a/compiler/llvm/ir_builder.h
+++ b/compiler/llvm/ir_builder.h
@@ -219,7 +219,6 @@
   ::llvm::Value* CreatePtrDisp(::llvm::Value* base,
                              ::llvm::Value* offset,
                              ::llvm::PointerType* ret_ty) {
-
     ::llvm::Value* base_int = CreatePtrToInt(base, getPtrEquivIntTy());
     ::llvm::Value* result_int = CreateAdd(base_int, offset);
     ::llvm::Value* result = CreateIntToPtr(result_int, ret_ty);
@@ -232,7 +231,6 @@
                              ::llvm::Value* count,
                              ::llvm::Value* offset,
                              ::llvm::PointerType* ret_ty) {
-
     ::llvm::Value* block_offset = CreateMul(bs, count);
     ::llvm::Value* total_offset = CreateAdd(block_offset, offset);
 
diff --git a/compiler/llvm/llvm_compilation_unit.cc b/compiler/llvm/llvm_compilation_unit.cc
index dfb5724..1f2b977 100644
--- a/compiler/llvm/llvm_compilation_unit.cc
+++ b/compiler/llvm/llvm_compilation_unit.cc
@@ -166,7 +166,6 @@
 }
 
 bool LlvmCompilationUnit::Materialize() {
-
   const bool kDumpBitcode = false;
   if (kDumpBitcode) {
     // Dump the bitcode for debugging
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 0bfa4ec..4c32506 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -83,7 +83,6 @@
     size_oat_dex_file_methods_offsets_(0),
     size_oat_class_status_(0),
     size_oat_class_method_offsets_(0) {
-
   size_t offset = InitOatHeader();
   offset = InitOatDexFiles(offset);
   offset = InitDexFiles(offset);
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 541c916..9e23d3e 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -440,7 +440,6 @@
 // during development when fatal aborts lead to a cascade of failures
 // that result in a deadlock.
 class WatchDog {
-
 // WatchDog defines its own CHECK_PTHREAD_CALL to avoid using Log which uses locks
 #undef CHECK_PTHREAD_CALL
 #define CHECK_WATCH_DOG_PTHREAD_CALL(call, args, what) \
diff --git a/runtime/atomic_integer.h b/runtime/atomic_integer.h
index 3492487..132f968 100644
--- a/runtime/atomic_integer.h
+++ b/runtime/atomic_integer.h
@@ -78,6 +78,6 @@
   volatile int32_t value_;
 };
 
-}
+}  // namespace art
 
 #endif  // ART_RUNTIME_ATOMIC_INTEGER_H_
diff --git a/runtime/barrier.cc b/runtime/barrier.cc
index 250d468..a644998 100644
--- a/runtime/barrier.cc
+++ b/runtime/barrier.cc
@@ -60,4 +60,4 @@
   CHECK(!count_) << "Attempted to destroy barrier with non zero count";
 }
 
-}
+}  // namespace art
diff --git a/runtime/barrier_test.cc b/runtime/barrier_test.cc
index d26ae9e..298ae56 100644
--- a/runtime/barrier_test.cc
+++ b/runtime/barrier_test.cc
@@ -32,9 +32,7 @@
       : barrier_(barrier),
         count1_(count1),
         count2_(count2),
-        count3_(count3) {
-
-  }
+        count3_(count3) {}
 
   void Run(Thread* self) {
     LOG(INFO) << "Before barrier 1 " << *self;
@@ -50,6 +48,7 @@
   virtual void Finalize() {
     delete this;
   }
+
  private:
   Barrier* const barrier_;
   AtomicInteger* const count1_;
@@ -100,9 +99,7 @@
   CheckPassTask(Barrier* barrier, AtomicInteger* count, size_t subtasks)
       : barrier_(barrier),
         count_(count),
-        subtasks_(subtasks) {
-
-  }
+        subtasks_(subtasks) {}
 
   void Run(Thread* self) {
     for (size_t i = 0; i < subtasks_; ++i) {
diff --git a/runtime/base/histogram-inl.h b/runtime/base/histogram-inl.h
index bbca603..d572cf9 100644
--- a/runtime/base/histogram-inl.h
+++ b/runtime/base/histogram-inl.h
@@ -212,7 +212,6 @@
   DCHECK_GT(cumulative_perc_.size(), 0ull);
   size_t idx, upper_idx = 0, lower_idx = 0;
   for (idx = 0; idx < cumulative_perc_.size(); idx++) {
-
     if (per <= cumulative_perc_[idx]) {
       upper_idx = idx;
       break;
diff --git a/runtime/base/histogram.h b/runtime/base/histogram.h
index dfb556b..33a1e65 100644
--- a/runtime/base/histogram.h
+++ b/runtime/base/histogram.h
@@ -30,7 +30,6 @@
 // Designed to be simple and used with timing logger in art.
 
 template <class Value> class Histogram {
-
   const double kAdjust;
   const Value kBucketWidth;
   const size_t kInitialBucketCount;
diff --git a/runtime/base/timing_logger.h b/runtime/base/timing_logger.h
index 816cbea..0f00a04 100644
--- a/runtime/base/timing_logger.h
+++ b/runtime/base/timing_logger.h
@@ -50,9 +50,7 @@
 }  // namespace base
 
 class CumulativeLogger {
-
  public:
-
   explicit CumulativeLogger(const std::string& name);
   void prepare_stats();
   ~CumulativeLogger();
@@ -68,7 +66,6 @@
   void AddNewLogger(const base::NewTimingLogger& logger) LOCKS_EXCLUDED(lock_);
 
  private:
-
   void AddPair(const std::string &label, uint64_t delta_time)
       EXCLUSIVE_LOCKS_REQUIRED(lock_);
   void DumpHistogram(std::ostream &os) EXCLUSIVE_LOCKS_REQUIRED(lock_);
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 5a31c87..b502c9a 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -155,7 +155,6 @@
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     Dbg::PostException(thread, throw_location, catch_method, catch_dex_pc, exception_object);
   }
-
 } gDebugInstrumentationListener;
 
 // JDWP is allowed unless the Zygote forbids it.
@@ -761,7 +760,6 @@
 JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids,
                                        std::vector<uint64_t>& counts)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-
   std::vector<mirror::Class*> classes;
   counts.clear();
   for (size_t i = 0; i < class_ids.size(); ++i) {
diff --git a/runtime/dex_method_iterator.h b/runtime/dex_method_iterator.h
index e915d77..1975e48 100644
--- a/runtime/dex_method_iterator.h
+++ b/runtime/dex_method_iterator.h
@@ -120,7 +120,6 @@
   }
 
  private:
-
   ClassDataItemIterator& GetIterator() const {
     CHECK(it_.get() != NULL);
     return *it_.get();
diff --git a/runtime/gc/accounting/heap_bitmap-inl.h b/runtime/gc/accounting/heap_bitmap-inl.h
index 7622604..5edea95 100644
--- a/runtime/gc/accounting/heap_bitmap-inl.h
+++ b/runtime/gc/accounting/heap_bitmap-inl.h
@@ -40,7 +40,6 @@
     SpaceSetMap* set = *it;
     set->Visit(visitor);
   }
-
 }
 
 }  // namespace accounting
diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h
index f4b725c..1710579 100644
--- a/runtime/gc/accounting/heap_bitmap.h
+++ b/runtime/gc/accounting/heap_bitmap.h
@@ -106,7 +106,6 @@
   explicit HeapBitmap(Heap* heap) : heap_(heap) {}
 
  private:
-
   const Heap* const heap_;
 
   void AddContinuousSpaceBitmap(SpaceBitmap* bitmap);
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 19f1128..6edc067 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -64,9 +64,7 @@
 }
 
 // Clean up any resources associated with the bitmap.
-SpaceBitmap::~SpaceBitmap() {
-
-}
+SpaceBitmap::~SpaceBitmap() {}
 
 void SpaceBitmap::SetHeapLimit(uintptr_t new_end) {
   DCHECK(IsAligned<kBitsPerWord * kAlignment>(new_end));
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index 5a1bfe3..bf4c1ed 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -174,6 +174,7 @@
     const size_t index = OffsetToIndex(offset);
     return &bitmap_begin_[index];
   }
+
  private:
   // TODO: heap_end_ is initialized so that the heap bitmap is empty, this doesn't require the -1,
   // however, we document that this is expected on heap_end_
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index a22faac..1684664 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -79,7 +79,6 @@
   void SwapBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
  protected:
-
   // The initial phase. Done without mutators paused.
   virtual void InitializePhase() = 0;
 
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index fde2b41..bdda9fa 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -78,7 +78,6 @@
   void Dump(std::ostream& os) const;
 
  private:
-
   // Tries to initialize an ImageSpace from the given image path,
   // returning NULL on error.
   //
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index f7d776f..6aedd9c 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -49,9 +49,7 @@
 
 LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
     : LargeObjectSpace(name),
-      lock_("large object map space lock", kAllocSpaceLock) {
-
-}
+      lock_("large object map space lock", kAllocSpaceLock) {}
 
 LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
   return new LargeObjectMapSpace(name);
@@ -147,9 +145,7 @@
   AddFreeChunk(begin_, end_ - begin_, NULL);
 }
 
-FreeListSpace::~FreeListSpace() {
-
-}
+FreeListSpace::~FreeListSpace() {}
 
 void FreeListSpace::AddFreeChunk(void* address, size_t size, Chunk* previous) {
   Chunk* chunk = ChunkFromAddr(address);
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index db845db..20a4867 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -60,7 +60,6 @@
   size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs);
 
  protected:
-
   explicit LargeObjectSpace(const std::string& name);
 
   // Approximate number of bytes which have been allocated into the space.
@@ -165,6 +164,7 @@
       DCHECK(m_previous == NULL ||
             (m_previous != NULL && m_previous + m_previous->GetSize() / kAlignment == this));
     }
+
    private:
     size_t m_size;
     Chunk* m_previous;
diff --git a/runtime/image_test.cc b/runtime/image_test.cc
index 9ab1d74..ee50118 100644
--- a/runtime/image_test.cc
+++ b/runtime/image_test.cc
@@ -31,7 +31,6 @@
 namespace art {
 
 class ImageTest : public CommonTest {
-
  protected:
   virtual void SetUp() {
     ReserveImageSpace();
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 2fb272c..45314c2 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -366,7 +366,6 @@
       {
         ScopedThreadStateChange tsc(self, kNative);
         jresult = fn(soa.Env(), rcvr.get(), arg0.get());
-
       }
       result->SetL(soa.Decode<Object*>(jresult));
       ScopedThreadStateChange tsc(self, kNative);
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 4feeafb..9af2f83 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -361,7 +361,6 @@
 
 static JdwpError VM_CapabilitiesNew(JdwpState*, Request& request, ExpandBuf* reply)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-
   // The first few capabilities are the same as those reported by the older call.
   VM_Capabilities(NULL, request, reply);
 
diff --git a/runtime/mirror/abstract_method.h b/runtime/mirror/abstract_method.h
index d909058..bbebece 100644
--- a/runtime/mirror/abstract_method.h
+++ b/runtime/mirror/abstract_method.h
@@ -497,13 +497,9 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(AbstractMethod);
 };
 
-class MANAGED Method : public AbstractMethod {
+class MANAGED Method : public AbstractMethod {};
 
-};
-
-class MANAGED Constructor : public AbstractMethod {
-
-};
+class MANAGED Constructor : public AbstractMethod {};
 
 class MANAGED AbstractMethodClass : public Class {
  private:
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 2d2130c..e490d97 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -320,13 +320,11 @@
   Class* java_lang_Class = GetClass();
   Class* java_lang_reflect_Field = java_lang_Class->GetInstanceField(0)->GetClass();
   return this == java_lang_reflect_Field;
-
 }
 
 bool Class::IsMethodClass() const {
   return (this == AbstractMethod::GetMethodClass()) ||
-      (this == AbstractMethod::GetConstructorClass());
-
+          (this == AbstractMethod::GetConstructorClass());
 }
 
 void Class::SetClassLoader(ClassLoader* new_class_loader) {
diff --git a/runtime/oat/runtime/argument_visitor.h b/runtime/oat/runtime/argument_visitor.h
index d92ff19..aaf93f7 100644
--- a/runtime/oat/runtime/argument_visitor.h
+++ b/runtime/oat/runtime/argument_visitor.h
@@ -199,7 +199,6 @@
     uint64_t low_half = *reinterpret_cast<uint32_t*>(GetParamAddress());
     uint64_t high_half = *reinterpret_cast<uint32_t*>(stack_args_);
     return (low_half & 0xffffffffULL) | (high_half << 32);
-
   }
 
   void VisitArguments() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -244,6 +243,6 @@
   bool is_split_long_or_double_;
 };
 
-}
+}  // namespace art
 
 #endif  // ART_RUNTIME_OAT_RUNTIME_ARGUMENT_VISITOR_H_
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index bb8341e..6562633 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -122,7 +122,6 @@
 }
 
 bool OatFile::Dlopen(const std::string& elf_filename, byte* requested_base) {
-
   char* absolute_path = realpath(elf_filename.c_str(), NULL);
   if (absolute_path == NULL) {
     return false;
diff --git a/runtime/runtime_support_llvm.cc b/runtime/runtime_support_llvm.cc
index cbdefe8..d703db2 100644
--- a/runtime/runtime_support_llvm.cc
+++ b/runtime/runtime_support_llvm.cc
@@ -50,7 +50,6 @@
 using namespace art;
 
 extern "C" {
-
 class ShadowFrameCopyVisitor : public StackVisitor {
  public:
   explicit ShadowFrameCopyVisitor(Thread* self) : StackVisitor(self, NULL), prev_frame_(NULL),
@@ -844,5 +843,4 @@
 void art_portable_constructor_barrier() {
   LOG(FATAL) << "Implemented by IRBuilder.";
 }
-
 }  // extern "C"
diff --git a/runtime/runtime_support_llvm.h b/runtime/runtime_support_llvm.h
index 566f7bc..43ea953 100644
--- a/runtime/runtime_support_llvm.h
+++ b/runtime/runtime_support_llvm.h
@@ -18,13 +18,10 @@
 #define ART_RUNTIME_RUNTIME_SUPPORT_LLVM_H_
 
 extern "C" {
-
 //----------------------------------------------------------------------------
 // Runtime Support Function Lookup Callback
 //----------------------------------------------------------------------------
-
 void* art_portable_find_runtime_support_func(void* context, const char* name);
-
 }  // extern "C"
 
 #endif  // ART_RUNTIME_RUNTIME_SUPPORT_LLVM_H_
diff --git a/runtime/stack.h b/runtime/stack.h
index 0e2c4c5..99ba898 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -554,7 +554,6 @@
   static void DescribeStack(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  private:
-
   instrumentation::InstrumentationStackFrame GetInstrumentationStackFrame(uint32_t depth) const;
 
   void SanityCheckFrame() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -567,6 +566,7 @@
   size_t num_frames_;
   // Depth of the frame we're currently at.
   size_t cur_depth_;
+
  protected:
   Context* const context_;
 };
@@ -638,6 +638,7 @@
     spill_shifts--;  // wind back one as we want the last match
     return spill_shifts;
   }
+
  private:
   const uint16_t* table_;
 };
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 76984c1..f0d5417 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -2067,9 +2067,7 @@
 
 class RootCallbackVisitor {
  public:
-  RootCallbackVisitor(RootVisitor* visitor, void* arg) : visitor_(visitor), arg_(arg) {
-
-  }
+  RootCallbackVisitor(RootVisitor* visitor, void* arg) : visitor_(visitor), arg_(arg) {}
 
   void operator()(const mirror::Object* obj, size_t, const StackVisitor*) const {
     visitor_(obj, arg_);
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index 784a7ca..067ef2d 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -180,10 +180,7 @@
 
 WorkStealingWorker::WorkStealingWorker(ThreadPool* thread_pool, const std::string& name,
                                        size_t stack_size)
-    : ThreadPoolWorker(thread_pool, name, stack_size),
-      task_(NULL) {
-
-}
+    : ThreadPoolWorker(thread_pool, name, stack_size), task_(NULL) {}
 
 void WorkStealingWorker::Run() {
   Thread* self = Thread::Current();
@@ -254,9 +251,7 @@
   }
 }
 
-WorkStealingWorker::~WorkStealingWorker() {
-
-}
+WorkStealingWorker::~WorkStealingWorker() {}
 
 WorkStealingThreadPool::WorkStealingThreadPool(size_t num_threads)
     : ThreadPool(0),
@@ -288,8 +283,6 @@
   return NULL;
 }
 
-WorkStealingThreadPool::~WorkStealingThreadPool() {
-
-}
+WorkStealingThreadPool::~WorkStealingThreadPool() {}
 
 }  // namespace art
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index b9f185d..7b626fb 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -124,9 +124,7 @@
 
 class WorkStealingTask : public Task {
  public:
-  WorkStealingTask() : ref_count_(0) {
-
-  }
+  WorkStealingTask() : ref_count_(0) {}
 
   size_t GetRefCount() const {
     return ref_count_;
diff --git a/runtime/thread_pool_test.cc b/runtime/thread_pool_test.cc
index 10954e8..9b789d2 100644
--- a/runtime/thread_pool_test.cc
+++ b/runtime/thread_pool_test.cc
@@ -105,9 +105,7 @@
   TreeTask(ThreadPool* const thread_pool, AtomicInteger* count, int depth)
       : thread_pool_(thread_pool),
         count_(count),
-        depth_(depth) {
-
-  }
+        depth_(depth) {}
 
   void Run(Thread* self) {
     if (depth_ > 1) {
diff --git a/runtime/trace.h b/runtime/trace.h
index 5bd6a8d..bd9c140 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -78,6 +78,7 @@
                                mirror::AbstractMethod* catch_method, uint32_t catch_dex_pc,
                                mirror::Throwable* exception_object)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
  private:
   explicit Trace(File* trace_file, int buffer_size, int flags);
 
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 5a70f2a..ff7f594 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -3749,7 +3749,6 @@
 }
 
 MethodVerifier::PcToConcreteMethodMap* MethodVerifier::GenerateDevirtMap() {
-
   // It is risky to rely on reg_types for sharpening in cases of soft
   // verification, we might end up sharpening to a wrong implementation. Just abort.
   if (!failure_messages_.empty()) {
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index c66e7cb..5b806c4 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -309,6 +309,7 @@
 
   // Destroy the singleton instance.
   static void Destroy();
+
  private:
   ConflictType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
@@ -338,6 +339,7 @@
 
   // Destroy the singleton instance.
   static void Destroy();
+
  private:
   UndefinedType(mirror::Class* klass, const std::string& descriptor, uint16_t cache_id)
      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
@@ -875,6 +877,7 @@
   }
 
   std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
  private:
   void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -909,6 +912,7 @@
   }
 
   std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
  private:
   void CheckInvariants() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
diff --git a/runtime/verifier/reg_type_test.cc b/runtime/verifier/reg_type_test.cc
index f37edff..d2c9dd6 100644
--- a/runtime/verifier/reg_type_test.cc
+++ b/runtime/verifier/reg_type_test.cc
@@ -414,7 +414,6 @@
   EXPECT_EQ(expected, unresolved_merged.Dump());
 }
 
-
 TEST_F(RegTypeReferenceTest, JavalangString) {
   // Add a class to the cache then look for the same class and make sure it is  a
   // Hit the second time. Then check for the same effect when using
@@ -433,8 +432,8 @@
   const RegType& ref_type_unintialized = cache.Uninitialized(ref_type, 0110ull);
   EXPECT_TRUE(ref_type_unintialized.IsUninitializedReference());
   EXPECT_FALSE(ref_type_unintialized.IsUnresolvedAndUninitializedReference());
-
 }
+
 TEST_F(RegTypeReferenceTest, JavalangObject) {
   // Add a class to the cache then look for the same class and make sure it is  a
   // Hit the second time. Then I am checking for the same effect when using
@@ -474,7 +473,6 @@
 
 
 TEST_F(RegTypeTest, ConstPrecision) {
-
   // Tests creating primitive types types.
   ScopedObjectAccess soa(Thread::Current());
   RegTypeCache cache_new(true);
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index 3a2145b9..d2abaac 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -254,7 +254,6 @@
     SetRegisterTypeWide(vdst, type_l, type_h);  // also sets the high
     result_[0] = verifier_->GetRegTypeCache()->Undefined().GetId();
     result_[1] = verifier_->GetRegTypeCache()->Undefined().GetId();
-
   }
 }
 
diff --git a/test/ReferenceMap/stack_walk_refmap_jni.cc b/test/ReferenceMap/stack_walk_refmap_jni.cc
index 9ef4a59..492916e 100644
--- a/test/ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/ReferenceMap/stack_walk_refmap_jni.cc
@@ -280,4 +280,4 @@
   return count + 1;
 }
 
-}
+}  // namespace art
diff --git a/test/StackWalk/stack_walk_jni.cc b/test/StackWalk/stack_walk_jni.cc
index 4b472da..fc156b1 100644
--- a/test/StackWalk/stack_walk_jni.cc
+++ b/test/StackWalk/stack_walk_jni.cc
@@ -127,4 +127,4 @@
   return count + 1;
 }
 
-}
+} // namespace art