Merge "Add support for invoke-static in optimizing compiler."
diff --git a/Android.mk b/Android.mk
index b87f0d3..6c388e5 100644
--- a/Android.mk
+++ b/Android.mk
@@ -232,7 +232,7 @@
 define declare-test-art-target-run-test
 .PHONY: test-art-target-run-test-$(1)
 test-art-target-run-test-$(1): test-art-target-sync $(DX) $(HOST_OUT_EXECUTABLES)/jasmin
-	DX=$(abspath $(DX)) JASMIN=$(abspath $(HOST_OUT_EXECUTABLES)/jasmin) art/test/run-test $(1)
+	DX=$(abspath $(DX)) JASMIN=$(abspath $(HOST_OUT_EXECUTABLES)/jasmin) art/test/run-test $(DALVIKVM_FLAGS) $(1)
 	@echo test-art-target-run-test-$(1) PASSED
 
 TEST_ART_TARGET_RUN_TEST_TARGETS += test-art-target-run-test-$(1)
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 39535e9..db77fee 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -16,10 +16,12 @@
  */
 
 #include "dex_file.h"
+#include "dex_file-inl.h"
 #include "dex_instruction.h"
 #include "dex_instruction-inl.h"
 #include "builder.h"
 #include "nodes.h"
+#include "primitive.h"
 
 namespace art {
 
@@ -192,6 +194,23 @@
       break;
     }
 
+    case Instruction::INVOKE_STATIC: {
+      uint32_t method_idx = instruction.VRegB_35c();
+      const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
+      uint32_t return_type_idx = dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
+      const char* descriptor = dex_file_->StringByTypeIdx(return_type_idx);
+      const size_t number_of_arguments = instruction.VRegA_35c();
+      if (number_of_arguments != 0) {
+        return false;
+      }
+      if (Primitive::GetType(descriptor[0]) != Primitive::kPrimVoid) {
+        return false;
+      }
+      current_block_->AddInstruction(new (arena_) HInvokeStatic(
+          arena_, number_of_arguments, dex_offset, method_idx));
+      break;
+    }
+
     case Instruction::NOP:
       break;
 
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index fff83a1..46ca9aa 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -18,6 +18,7 @@
 #define ART_COMPILER_OPTIMIZING_BUILDER_H_
 
 #include "dex_file.h"
+#include "driver/dex_compilation_unit.h"
 #include "utils/allocation.h"
 #include "utils/growable_array.h"
 
@@ -33,7 +34,9 @@
 
 class HGraphBuilder : public ValueObject {
  public:
-  explicit HGraphBuilder(ArenaAllocator* arena)
+  HGraphBuilder(ArenaAllocator* arena,
+                const DexCompilationUnit* dex_compilation_unit = nullptr,
+                const DexFile* dex_file = nullptr)
       : arena_(arena),
         branch_targets_(arena, 0),
         locals_(arena, 0),
@@ -42,7 +45,9 @@
         current_block_(nullptr),
         graph_(nullptr),
         constant0_(nullptr),
-        constant1_(nullptr) { }
+        constant1_(nullptr),
+        dex_file_(dex_file),
+        dex_compilation_unit_(dex_compilation_unit) { }
 
   HGraph* BuildGraph(const DexFile::CodeItem& code);
 
@@ -83,6 +88,9 @@
   HIntConstant* constant0_;
   HIntConstant* constant1_;
 
+  const DexFile* const dex_file_;
+  const DexCompilationUnit* const dex_compilation_unit_;
+
   DISALLOW_COPY_AND_ASSIGN(HGraphBuilder);
 };
 
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index bb6ac84..b86665b 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -21,8 +21,11 @@
 #include "dex/verified_method.h"
 #include "driver/dex_compilation_unit.h"
 #include "gc_map_builder.h"
+#include "leb128.h"
+#include "mapping_table.h"
 #include "utils/assembler.h"
 #include "verifier/dex_gc_map.h"
+#include "vmap_table.h"
 
 namespace art {
 
@@ -120,8 +123,95 @@
       dex_compilation_unit.GetVerifiedMethod()->GetDexGcMap();
   verifier::DexPcToReferenceMap dex_gc_map(&(gc_map_raw)[0]);
 
-  GcMapBuilder builder(data, 0, 0, dex_gc_map.RegWidth());
+  uint32_t max_native_offset = 0;
+  for (size_t i = 0; i < pc_infos_.Size(); i++) {
+    uint32_t native_offset = pc_infos_.Get(i).native_pc;
+    if (native_offset > max_native_offset) {
+      max_native_offset = native_offset;
+    }
+  }
+
+  GcMapBuilder builder(data, pc_infos_.Size(), max_native_offset, dex_gc_map.RegWidth());
+  for (size_t i = 0; i < pc_infos_.Size(); i++) {
+    struct PcInfo pc_info = pc_infos_.Get(i);
+    uint32_t native_offset = pc_info.native_pc;
+    uint32_t dex_pc = pc_info.dex_pc;
+    const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false);
+    CHECK(references != NULL) << "Missing ref for dex pc 0x" << std::hex << dex_pc;
+    builder.AddEntry(native_offset, references);
+  }
 }
 
+void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data) const {
+  uint32_t pc2dex_data_size = 0u;
+  uint32_t pc2dex_entries = pc_infos_.Size();
+  uint32_t pc2dex_offset = 0u;
+  int32_t pc2dex_dalvik_offset = 0;
+  uint32_t dex2pc_data_size = 0u;
+  uint32_t dex2pc_entries = 0u;
+
+  // We currently only have pc2dex entries.
+  for (size_t i = 0; i < pc2dex_entries; i++) {
+    struct PcInfo pc_info = pc_infos_.Get(i);
+    pc2dex_data_size += UnsignedLeb128Size(pc_info.native_pc - pc2dex_offset);
+    pc2dex_data_size += SignedLeb128Size(pc_info.dex_pc - pc2dex_dalvik_offset);
+    pc2dex_offset = pc_info.native_pc;
+    pc2dex_dalvik_offset = pc_info.dex_pc;
+  }
+
+  uint32_t total_entries = pc2dex_entries + dex2pc_entries;
+  uint32_t hdr_data_size = UnsignedLeb128Size(total_entries) + UnsignedLeb128Size(pc2dex_entries);
+  uint32_t data_size = hdr_data_size + pc2dex_data_size + dex2pc_data_size;
+  data->resize(data_size);
+
+  uint8_t* data_ptr = &(*data)[0];
+  uint8_t* write_pos = data_ptr;
+  write_pos = EncodeUnsignedLeb128(write_pos, total_entries);
+  write_pos = EncodeUnsignedLeb128(write_pos, pc2dex_entries);
+  DCHECK_EQ(static_cast<size_t>(write_pos - data_ptr), hdr_data_size);
+  uint8_t* write_pos2 = write_pos + pc2dex_data_size;
+
+  pc2dex_offset = 0u;
+  pc2dex_dalvik_offset = 0u;
+  for (size_t i = 0; i < pc2dex_entries; i++) {
+    struct PcInfo pc_info = pc_infos_.Get(i);
+    DCHECK(pc2dex_offset <= pc_info.native_pc);
+    write_pos = EncodeUnsignedLeb128(write_pos, pc_info.native_pc - pc2dex_offset);
+    write_pos = EncodeSignedLeb128(write_pos, pc_info.dex_pc - pc2dex_dalvik_offset);
+    pc2dex_offset = pc_info.native_pc;
+    pc2dex_dalvik_offset = pc_info.dex_pc;
+  }
+  DCHECK_EQ(static_cast<size_t>(write_pos - data_ptr), hdr_data_size + pc2dex_data_size);
+  DCHECK_EQ(static_cast<size_t>(write_pos2 - data_ptr), data_size);
+
+  if (kIsDebugBuild) {
+    // Verify the encoded table holds the expected data.
+    MappingTable table(data_ptr);
+    CHECK_EQ(table.TotalSize(), total_entries);
+    CHECK_EQ(table.PcToDexSize(), pc2dex_entries);
+    auto it = table.PcToDexBegin();
+    auto it2 = table.DexToPcBegin();
+    for (size_t i = 0; i < pc2dex_entries; i++) {
+      struct PcInfo pc_info = pc_infos_.Get(i);
+      CHECK_EQ(pc_info.native_pc, it.NativePcOffset());
+      CHECK_EQ(pc_info.dex_pc, it.DexPc());
+      ++it;
+    }
+    CHECK(it == table.PcToDexEnd());
+    CHECK(it2 == table.DexToPcEnd());
+  }
+}
+
+void CodeGenerator::BuildVMapTable(std::vector<uint8_t>* data) const {
+  Leb128EncodingVector vmap_encoder;
+  size_t size = 1 + 1 /* marker */ + 0;
+  vmap_encoder.Reserve(size + 1u);  // All values are likely to be one byte in ULEB128 (<128).
+  vmap_encoder.PushBackUnsigned(size);
+  // We're currently always saving the frame pointer, so set it in the table as a temporary.
+  vmap_encoder.PushBackUnsigned(kVRegTempBaseReg + VmapTable::kEntryAdjustment);
+  vmap_encoder.PushBackUnsigned(VmapTable::kAdjustedFpMarker);
+
+  *data = vmap_encoder.GetData();
+}
 
 }  // namespace art
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 63f8cbf..24dcab6 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -38,6 +38,11 @@
   DISALLOW_COPY_AND_ASSIGN(CodeAllocator);
 };
 
+struct PcInfo {
+  uint32_t dex_pc;
+  uintptr_t native_pc;
+};
+
 /**
  * A Location is an abstraction over the potential location
  * of an instruction. It could be in register or stack.
@@ -81,7 +86,8 @@
 class LocationSummary : public ArenaObject {
  public:
   explicit LocationSummary(HInstruction* instruction)
-      : inputs(instruction->GetBlock()->GetGraph()->GetArena(), instruction->InputCount()) {
+      : inputs(instruction->GetBlock()->GetGraph()->GetArena(), instruction->InputCount()),
+        temps(instruction->GetBlock()->GetGraph()->GetArena(), 0) {
     inputs.SetSize(instruction->InputCount());
     for (int i = 0; i < instruction->InputCount(); i++) {
       inputs.Put(i, Location());
@@ -100,10 +106,19 @@
     output = Location(location);
   }
 
+  void AddTemp(Location location) {
+    temps.Add(location);
+  }
+
+  Location GetTemp(uint32_t at) const {
+    return temps.Get(at);
+  }
+
   Location Out() const { return output; }
 
  private:
   GrowableArray<Location> inputs;
+  GrowableArray<Location> temps;
   Location output;
 
   DISALLOW_COPY_AND_ASSIGN(LocationSummary);
@@ -134,9 +149,17 @@
 
   uint32_t GetFrameSize() const { return frame_size_; }
   void SetFrameSize(uint32_t size) { frame_size_ = size; }
+  uint32_t GetCoreSpillMask() const { return core_spill_mask_; }
 
-  void BuildMappingTable(std::vector<uint8_t>* vector) const { }
-  void BuildVMapTable(std::vector<uint8_t>* vector) const { }
+  void RecordPcInfo(uint32_t dex_pc) {
+    struct PcInfo pc_info;
+    pc_info.dex_pc = dex_pc;
+    pc_info.native_pc = GetAssembler()->CodeSize();
+    pc_infos_.Add(pc_info);
+  }
+
+  void BuildMappingTable(std::vector<uint8_t>* vector) const;
+  void BuildVMapTable(std::vector<uint8_t>* vector) const;
   void BuildNativeGCMap(
       std::vector<uint8_t>* vector, const DexCompilationUnit& dex_compilation_unit) const;
 
@@ -144,23 +167,26 @@
   explicit CodeGenerator(HGraph* graph)
       : frame_size_(0),
         graph_(graph),
-        block_labels_(graph->GetArena(), 0) {
+        block_labels_(graph->GetArena(), 0),
+        pc_infos_(graph->GetArena(), 32) {
     block_labels_.SetSize(graph->GetBlocks()->Size());
   }
   ~CodeGenerator() { }
 
+  // Frame size required for this method.
+  uint32_t frame_size_;
+  uint32_t core_spill_mask_;
+
  private:
   void InitLocations(HInstruction* instruction);
   void CompileBlock(HBasicBlock* block);
   void CompileEntryBlock();
 
-  // Frame size required for this method.
-  uint32_t frame_size_;
-
   HGraph* const graph_;
 
   // Labels for each block that will be compiled.
   GrowableArray<Label> block_labels_;
+  GrowableArray<PcInfo> pc_infos_;
 
   DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
 };
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 04bdc34..c85d67d 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -18,17 +18,27 @@
 #include "utils/assembler.h"
 #include "utils/arm/assembler_arm.h"
 
+#include "mirror/array.h"
+#include "mirror/art_method.h"
+
 #define __ reinterpret_cast<ArmAssembler*>(GetAssembler())->
 
 namespace art {
 namespace arm {
 
 void CodeGeneratorARM::GenerateFrameEntry() {
+  core_spill_mask_ |= (1 << LR);
+  // We're currently always using FP, which is callee-saved in Quick.
+  core_spill_mask_ |= (1 << FP);
+
   __ PushList((1 << FP) | (1 << LR));
   __ mov(FP, ShifterOperand(SP));
-  if (GetFrameSize() != 0) {
-    __ AddConstant(SP, -GetFrameSize());
-  }
+
+  // Add the current ART method to the frame size, the return pc, and FP.
+  SetFrameSize(RoundUp(GetFrameSize() + 3 * kWordSize, kStackAlignment));
+  // PC and FP have already been pushed on the stack.
+  __ AddConstant(SP, -(GetFrameSize() - 2 * kWordSize));
+  __ str(R0, Address(SP, 0));
 }
 
 void CodeGeneratorARM::GenerateFrameExit() {
@@ -173,5 +183,43 @@
   codegen_->GenerateFrameExit();
 }
 
+void LocationsBuilderARM::VisitInvokeStatic(HInvokeStatic* invoke) {
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(invoke);
+  CHECK_EQ(invoke->InputCount(), 0);
+  locations->AddTemp(Location(R0));
+  invoke->SetLocations(locations);
+}
+
+void InstructionCodeGeneratorARM::LoadCurrentMethod(Register reg) {
+  __ ldr(reg, Address(SP, 0));
+}
+
+void InstructionCodeGeneratorARM::VisitInvokeStatic(HInvokeStatic* invoke) {
+  Register temp = invoke->GetLocations()->GetTemp(0).reg<Register>();
+  size_t index_in_cache = mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
+      invoke->GetIndexInDexCache() * kWordSize;
+
+  // TODO: Implement all kinds of calls:
+  // 1) boot -> boot
+  // 2) app -> boot
+  // 3) app -> app
+  //
+  // Currently we implement the app -> app logic, which looks up in the resolve cache.
+
+  // temp = method;
+  LoadCurrentMethod(temp);
+  // temp = temp->dex_cache_resolved_methods_;
+  __ ldr(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
+  // temp = temp[index_in_cache]
+  __ ldr(temp, Address(temp, index_in_cache));
+  // LR = temp[offset_of_quick_compiled_code]
+  __ ldr(LR, Address(temp,
+                     mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
+  // LR()
+  __ blx(LR);
+
+  codegen_->RecordPcInfo(invoke->GetDexPc());
+}
+
 }  // namespace arm
 }  // namespace art
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 52a7bf4..7a2835d 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -58,6 +58,7 @@
 #undef DECLARE_VISIT_INSTRUCTION
 
   Assembler* GetAssembler() const { return assembler_; }
+  void LoadCurrentMethod(Register reg);
 
  private:
   Assembler* const assembler_;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index c4bda56..54bff0c 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -18,18 +18,28 @@
 #include "utils/assembler.h"
 #include "utils/x86/assembler_x86.h"
 
+#include "mirror/array.h"
+#include "mirror/art_method.h"
+
 #define __ reinterpret_cast<X86Assembler*>(GetAssembler())->
 
 namespace art {
 namespace x86 {
 
 void CodeGeneratorX86::GenerateFrameEntry() {
+  // Create a fake register to mimic Quick.
+  static const int kFakeReturnRegister = 8;
+  core_spill_mask_ |= (1 << kFakeReturnRegister);
+  // We're currently always using EBP, which is callee-saved in Quick.
+  core_spill_mask_ |= (1 << EBP);
+
   __ pushl(EBP);
   __ movl(EBP, ESP);
-
-  if (GetFrameSize() != 0) {
-    __ subl(ESP, Immediate(GetFrameSize()));
-  }
+  // Add the current ART method to the frame size, the return pc, and EBP.
+  SetFrameSize(RoundUp(GetFrameSize() + 3 * kWordSize, kStackAlignment));
+  // The PC and EBP have already been pushed on the stack.
+  __ subl(ESP, Immediate(GetFrameSize() - 2 * kWordSize));
+  __ movl(Address(ESP, 0), EAX);
 }
 
 void CodeGeneratorX86::GenerateFrameExit() {
@@ -45,6 +55,10 @@
   __ pushl(location.reg<Register>());
 }
 
+void InstructionCodeGeneratorX86::LoadCurrentMethod(Register reg) {
+  __ movl(reg, Address(ESP, 0));
+}
+
 void CodeGeneratorX86::Move(HInstruction* instruction, Location location) {
   HIntConstant* constant = instruction->AsIntConstant();
   if (constant != nullptr) {
@@ -110,7 +124,8 @@
 
 static int32_t GetStackSlot(HLocal* local) {
   // We are currently using EBP to access locals, so the offset must be negative.
-  return (local->GetRegNumber() + 1) * -kWordSize;
+  // +1 for going backwards, +1 for the method pointer.
+  return (local->GetRegNumber() + 2) * -kWordSize;
 }
 
 void InstructionCodeGeneratorX86::VisitLoadLocal(HLoadLocal* load) {
@@ -172,5 +187,36 @@
   __ ret();
 }
 
+void LocationsBuilderX86::VisitInvokeStatic(HInvokeStatic* invoke) {
+  LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(invoke);
+  CHECK_EQ(invoke->InputCount(), 0);
+  locations->AddTemp(Location(EAX));
+  invoke->SetLocations(locations);
+}
+
+void InstructionCodeGeneratorX86::VisitInvokeStatic(HInvokeStatic* invoke) {
+  Register temp = invoke->GetLocations()->GetTemp(0).reg<Register>();
+  size_t index_in_cache = mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
+      invoke->GetIndexInDexCache() * kWordSize;
+
+  // TODO: Implement all kinds of calls:
+  // 1) boot -> boot
+  // 2) app -> boot
+  // 3) app -> app
+  //
+  // Currently we implement the app -> app logic, which looks up in the resolve cache.
+
+  // temp = method;
+  LoadCurrentMethod(temp);
+  // temp = temp->dex_cache_resolved_methods_;
+  __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()));
+  // temp = temp[index_in_cache]
+  __ movl(temp, Address(temp, index_in_cache));
+  // (temp + offset_of_quick_compiled_code)()
+  __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()));
+
+  codegen_->RecordPcInfo(invoke->GetDexPc());
+}
+
 }  // namespace x86
 }  // namespace art
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index ad2a061..505237b 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -57,6 +57,8 @@
 
 #undef DECLARE_VISIT_INSTRUCTION
 
+  void LoadCurrentMethod(Register reg);
+
   Assembler* GetAssembler() const { return assembler_; }
 
  private:
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index e74ed82..50d5c59 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -185,6 +185,7 @@
   M(Goto)                                                  \
   M(If)                                                    \
   M(IntConstant)                                           \
+  M(InvokeStatic)                                          \
   M(LoadLocal)                                             \
   M(Local)                                                 \
   M(Return)                                                \
@@ -554,6 +555,42 @@
   DISALLOW_COPY_AND_ASSIGN(HIntConstant);
 };
 
+class HInvoke : public HInstruction {
+ public:
+  HInvoke(ArenaAllocator* arena, uint32_t number_of_arguments, int32_t dex_pc)
+    : inputs_(arena, number_of_arguments),
+      dex_pc_(dex_pc) {
+    inputs_.SetSize(number_of_arguments);
+  }
+
+  virtual intptr_t InputCount() const { return inputs_.Size(); }
+  virtual HInstruction* InputAt(intptr_t i) const { return inputs_.Get(i); }
+
+  int32_t GetDexPc() const { return dex_pc_; }
+
+ protected:
+  GrowableArray<HInstruction*> inputs_;
+  const int32_t dex_pc_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(HInvoke);
+};
+
+class HInvokeStatic : public HInvoke {
+ public:
+  HInvokeStatic(ArenaAllocator* arena, uint32_t number_of_arguments, int32_t dex_pc, int32_t index_in_dex_cache)
+      : HInvoke(arena, number_of_arguments, dex_pc), index_in_dex_cache_(index_in_dex_cache) { }
+
+  uint32_t GetIndexInDexCache() const { return index_in_dex_cache_; }
+
+  DECLARE_INSTRUCTION(InvokeStatic)
+
+ private:
+  uint32_t index_in_dex_cache_;
+
+  DISALLOW_COPY_AND_ASSIGN(HInvokeStatic);
+};
+
 class HGraphVisitor : public ValueObject {
  public:
   explicit HGraphVisitor(HGraph* graph) : graph_(graph) { }
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 334b185..d19c40c 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -62,17 +62,31 @@
     nullptr, class_loader, art::Runtime::Current()->GetClassLinker(), dex_file, code_item,
     class_def_idx, method_idx, access_flags, driver.GetVerifiedMethod(&dex_file, method_idx));
 
+  // For testing purposes, we put a special marker on method names that should be compiled
+  // with this compiler. This makes sure we're not regressing.
+  bool shouldCompile = dex_compilation_unit.GetSymbol().find("00024opt_00024") != std::string::npos;
+
   ArenaPool pool;
   ArenaAllocator arena(&pool);
-  HGraphBuilder builder(&arena);
+  HGraphBuilder builder(&arena, &dex_compilation_unit, &dex_file);
   HGraph* graph = builder.BuildGraph(*code_item);
   if (graph == nullptr) {
+    if (shouldCompile) {
+      LOG(FATAL) << "Could not build graph in optimizing compiler";
+    }
     return nullptr;
   }
 
   InstructionSet instruction_set = driver.GetInstructionSet();
+  // The optimizing compiler currently does not have a Thumb2 assembler.
+  if (instruction_set == kThumb2) {
+    instruction_set = kArm;
+  }
   CodeGenerator* codegen = CodeGenerator::Create(&arena, graph, instruction_set);
   if (codegen == nullptr) {
+    if (shouldCompile) {
+      LOG(FATAL) << "Could not find code generator for optimizing compiler";
+    }
     return nullptr;
   }
 
@@ -90,7 +104,7 @@
                             instruction_set,
                             allocator.GetMemory(),
                             codegen->GetFrameSize(),
-                            0, /* GPR spill mask, unused */
+                            codegen->GetCoreSpillMask(),
                             0, /* FPR spill mask, unused */
                             mapping_table,
                             vmap_table,
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 72ebdd3..c23fd44 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -52,6 +52,23 @@
   class X86_64Assembler;
 }
 
+class ExternalLabel {
+ public:
+  ExternalLabel(const char* name, uword address)
+      : name_(name), address_(address) {
+    DCHECK(name != nullptr);
+  }
+
+  const char* name() const { return name_; }
+  uword address() const {
+    return address_;
+  }
+
+ private:
+  const char* name_;
+  const uword address_;
+};
+
 class Label {
  public:
   Label() : position_(0) {}
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index d242c17..ebbb43a 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -54,6 +54,16 @@
 }
 
 
+void X86Assembler::call(const ExternalLabel& label) {
+  AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+  intptr_t call_start = buffer_.GetPosition();
+  EmitUint8(0xE8);
+  EmitInt32(label.address());
+  static const intptr_t kCallExternalLabelSize = 5;
+  DCHECK_EQ((buffer_.GetPosition() - call_start), kCallExternalLabelSize);
+}
+
+
 void X86Assembler::pushl(Register reg) {
   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
   EmitUint8(0x50 + reg);
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 879f4ec..f906a6f 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -227,6 +227,7 @@
   void call(Register reg);
   void call(const Address& address);
   void call(Label* label);
+  void call(const ExternalLabel& label);
 
   void pushl(Register reg);
   void pushl(const Address& address);
diff --git a/test/401-optimizing-compiler/expected.txt b/test/401-optimizing-compiler/expected.txt
new file mode 100644
index 0000000..7b3a018
--- /dev/null
+++ b/test/401-optimizing-compiler/expected.txt
@@ -0,0 +1,3 @@
+In static method
+Forced GC
+java.lang.Error: Error
diff --git a/test/401-optimizing-compiler/info.txt b/test/401-optimizing-compiler/info.txt
new file mode 100644
index 0000000..2a89eb5
--- /dev/null
+++ b/test/401-optimizing-compiler/info.txt
@@ -0,0 +1,2 @@
+A set of tests for the optimizing compiler. They will incrementally cover what the
+optimizing compiler covers.
diff --git a/test/401-optimizing-compiler/src/Main.java b/test/401-optimizing-compiler/src/Main.java
new file mode 100644
index 0000000..2609e0f
--- /dev/null
+++ b/test/401-optimizing-compiler/src/Main.java
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Note that $opt$ is a marker for the optimizing compiler to ensure
+// it does compile the method.
+
+public class Main {
+  public static void main(String[] args) {
+    Error error = null;
+    try {
+      $opt$TestInvokeStatic();
+    } catch (Error e) {
+      error = e;
+    }
+    System.out.println(error);
+  }
+
+  public static void $opt$TestInvokeStatic() {
+    printStaticMethod();
+    forceGCStaticMethod();
+    throwStaticMethod();
+  }
+
+  public static void printStaticMethod() {
+    System.out.println("In static method");
+  }
+
+  public static void forceGCStaticMethod() {
+    Runtime.getRuntime().gc();
+    Runtime.getRuntime().gc();
+    Runtime.getRuntime().gc();
+    Runtime.getRuntime().gc();
+    Runtime.getRuntime().gc();
+    Runtime.getRuntime().gc();
+    System.out.println("Forced GC");
+  }
+
+  public static void throwStaticMethod() {
+    throw new Error("Error");
+  }
+}
diff --git a/test/etc/push-and-run-test-jar b/test/etc/push-and-run-test-jar
index 2a2aa70..9e30f65 100755
--- a/test/etc/push-and-run-test-jar
+++ b/test/etc/push-and-run-test-jar
@@ -18,6 +18,7 @@
 QUIET="n"
 DEV_MODE="n"
 INVOKE_WITH=""
+FLAGS=""
 
 while true; do
     if [ "x$1" = "x--quiet" ]; then
@@ -31,6 +32,11 @@
         fi
         LIB="$1"
         shift
+    elif [ "x$1" = "x-Xcompiler-option" ]; then
+        shift
+        option="$1"
+        FLAGS="${FLAGS} -Xcompiler-option $option"
+        shift
     elif [ "x$1" = "x--boot" ]; then
         shift
         BOOT_OPT="$1"
@@ -141,7 +147,7 @@
 JNI_OPTS="-Xjnigreflimit:512 -Xcheck:jni"
 
 cmdline="cd $DEX_LOCATION && mkdir dalvik-cache && export ANDROID_DATA=$DEX_LOCATION && export DEX_LOCATION=$DEX_LOCATION && \
-    $INVOKE_WITH $gdb /system/bin/dalvikvm $gdbargs -XXlib:$LIB $ZYGOTE $JNI_OPTS $INT_OPTS $DEBUGGER_OPTS $BOOT_OPT -cp $DEX_LOCATION/$TEST_NAME.jar Main"
+    $INVOKE_WITH $gdb /system/bin/dalvikvm $FLAGS $gdbargs -XXlib:$LIB $ZYGOTE $JNI_OPTS $INT_OPTS $DEBUGGER_OPTS $BOOT_OPT -cp $DEX_LOCATION/$TEST_NAME.jar Main"
 if [ "$DEV_MODE" = "y" ]; then
   echo $cmdline "$@"
 fi