[optimizing] Add support for x86 constant area

Use the Quick trick of finding the address of the method by calling the
next instruction and popping the return address into a register.  This
trick is used because of the lack of PC-relative addressing in 32 bit
mode on the X86.

Add a HX86ComputeBaseMethodAddress instruction to trigger generation
of the method address, which is referenced by instructions needing
access to the constant area.

Add a HX86LoadFromConstantTable instruction that takes a
HX86ComputeBaseMethodAddress and a HConstant that will be used to load
the value when needed.

Change Add/Sub/Mul/Div to detect a HX86LoadFromConstantTable right hand
side, and generate code that directly references the constant area.
Other uses will be added later.

Change the inputs to HReturn and HInvoke(s), replacing the FP constants
with HX86LoadFromConstantTable instead.  This allows values to be
loaded from the constant area into the right location.

Port the X86_64 assembler constant area handling to the X86.

Use the new per-backend optimization framework to do this conversion.

Change-Id: I6d235a72238262e4f9ec0f3c88319a187f865932
Signed-off-by: Mark Mendell <mark.p.mendell@intel.com>
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index f48395b..ecf5eed 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -19,6 +19,7 @@
 #include "art_method.h"
 #include "code_generator_utils.h"
 #include "compiled_method.h"
+#include "constant_area_fixups_x86.h"
 #include "entrypoints/quick/quick_entrypoints.h"
 #include "entrypoints/quick/quick_entrypoints_enum.h"
 #include "gc/accounting/card_table.h"
@@ -2213,7 +2214,7 @@
     case Primitive::kPrimFloat:
     case Primitive::kPrimDouble: {
       locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetInAt(1, Location::RequiresFpuRegister());
+      locations->SetInAt(1, Location::Any());
       locations->SetOut(Location::SameAsFirstInput());
       break;
     }
@@ -2275,6 +2276,16 @@
     case Primitive::kPrimFloat: {
       if (second.IsFpuRegister()) {
         __ addss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
+      } else if (add->InputAt(1)->IsX86LoadFromConstantTable()) {
+        HX86LoadFromConstantTable* const_area = add->InputAt(1)->AsX86LoadFromConstantTable();
+        DCHECK(!const_area->NeedsMaterialization());
+        __ addss(first.AsFpuRegister<XmmRegister>(),
+                 codegen_->LiteralFloatAddress(
+                   const_area->GetConstant()->AsFloatConstant()->GetValue(),
+                   const_area->GetLocations()->InAt(0).AsRegister<Register>()));
+      } else {
+        DCHECK(second.IsStackSlot());
+        __ addss(first.AsFpuRegister<XmmRegister>(), Address(ESP, second.GetStackIndex()));
       }
       break;
     }
@@ -2282,6 +2293,16 @@
     case Primitive::kPrimDouble: {
       if (second.IsFpuRegister()) {
         __ addsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
+      } else if (add->InputAt(1)->IsX86LoadFromConstantTable()) {
+        HX86LoadFromConstantTable* const_area = add->InputAt(1)->AsX86LoadFromConstantTable();
+        DCHECK(!const_area->NeedsMaterialization());
+        __ addsd(first.AsFpuRegister<XmmRegister>(),
+                 codegen_->LiteralDoubleAddress(
+                   const_area->GetConstant()->AsDoubleConstant()->GetValue(),
+                   const_area->GetLocations()->InAt(0).AsRegister<Register>()));
+      } else {
+        DCHECK(second.IsDoubleStackSlot());
+        __ addsd(first.AsFpuRegister<XmmRegister>(), Address(ESP, second.GetStackIndex()));
       }
       break;
     }
@@ -2305,7 +2326,7 @@
     case Primitive::kPrimFloat:
     case Primitive::kPrimDouble: {
       locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetInAt(1, Location::RequiresFpuRegister());
+      locations->SetInAt(1, Location::Any());
       locations->SetOut(Location::SameAsFirstInput());
       break;
     }
@@ -2351,12 +2372,36 @@
     }
 
     case Primitive::kPrimFloat: {
-      __ subss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
+      if (second.IsFpuRegister()) {
+        __ subss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
+      } else if (sub->InputAt(1)->IsX86LoadFromConstantTable()) {
+        HX86LoadFromConstantTable* const_area = sub->InputAt(1)->AsX86LoadFromConstantTable();
+        DCHECK(!const_area->NeedsMaterialization());
+        __ subss(first.AsFpuRegister<XmmRegister>(),
+                 codegen_->LiteralFloatAddress(
+                   const_area->GetConstant()->AsFloatConstant()->GetValue(),
+                   const_area->GetLocations()->InAt(0).AsRegister<Register>()));
+      } else {
+        DCHECK(second.IsStackSlot());
+        __ subss(first.AsFpuRegister<XmmRegister>(), Address(ESP, second.GetStackIndex()));
+      }
       break;
     }
 
     case Primitive::kPrimDouble: {
-      __ subsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
+      if (second.IsFpuRegister()) {
+        __ subsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
+      } else if (sub->InputAt(1)->IsX86LoadFromConstantTable()) {
+        HX86LoadFromConstantTable* const_area = sub->InputAt(1)->AsX86LoadFromConstantTable();
+        DCHECK(!const_area->NeedsMaterialization());
+        __ subsd(first.AsFpuRegister<XmmRegister>(),
+                 codegen_->LiteralDoubleAddress(
+                     const_area->GetConstant()->AsDoubleConstant()->GetValue(),
+                     const_area->GetLocations()->InAt(0).AsRegister<Register>()));
+      } else {
+        DCHECK(second.IsDoubleStackSlot());
+        __ subsd(first.AsFpuRegister<XmmRegister>(), Address(ESP, second.GetStackIndex()));
+      }
       break;
     }
 
@@ -2391,7 +2436,7 @@
     case Primitive::kPrimFloat:
     case Primitive::kPrimDouble: {
       locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetInAt(1, Location::RequiresFpuRegister());
+      locations->SetInAt(1, Location::Any());
       locations->SetOut(Location::SameAsFirstInput());
       break;
     }
@@ -2507,12 +2552,38 @@
     }
 
     case Primitive::kPrimFloat: {
-      __ mulss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
+      DCHECK(first.Equals(locations->Out()));
+      if (second.IsFpuRegister()) {
+        __ mulss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
+      } else if (mul->InputAt(1)->IsX86LoadFromConstantTable()) {
+        HX86LoadFromConstantTable* const_area = mul->InputAt(1)->AsX86LoadFromConstantTable();
+        DCHECK(!const_area->NeedsMaterialization());
+        __ mulss(first.AsFpuRegister<XmmRegister>(),
+                 codegen_->LiteralFloatAddress(
+                     const_area->GetConstant()->AsFloatConstant()->GetValue(),
+                     const_area->GetLocations()->InAt(0).AsRegister<Register>()));
+      } else {
+        DCHECK(second.IsStackSlot());
+        __ mulss(first.AsFpuRegister<XmmRegister>(), Address(ESP, second.GetStackIndex()));
+      }
       break;
     }
 
     case Primitive::kPrimDouble: {
-      __ mulsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
+      DCHECK(first.Equals(locations->Out()));
+      if (second.IsFpuRegister()) {
+        __ mulsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
+      } else if (mul->InputAt(1)->IsX86LoadFromConstantTable()) {
+        HX86LoadFromConstantTable* const_area = mul->InputAt(1)->AsX86LoadFromConstantTable();
+        DCHECK(!const_area->NeedsMaterialization());
+        __ mulsd(first.AsFpuRegister<XmmRegister>(),
+                 codegen_->LiteralDoubleAddress(
+                     const_area->GetConstant()->AsDoubleConstant()->GetValue(),
+                     const_area->GetLocations()->InAt(0).AsRegister<Register>()));
+      } else {
+        DCHECK(second.IsDoubleStackSlot());
+        __ mulsd(first.AsFpuRegister<XmmRegister>(), Address(ESP, second.GetStackIndex()));
+      }
       break;
     }
 
@@ -2855,7 +2926,7 @@
     case Primitive::kPrimFloat:
     case Primitive::kPrimDouble: {
       locations->SetInAt(0, Location::RequiresFpuRegister());
-      locations->SetInAt(1, Location::RequiresFpuRegister());
+      locations->SetInAt(1, Location::Any());
       locations->SetOut(Location::SameAsFirstInput());
       break;
     }
@@ -2867,7 +2938,6 @@
 
 void InstructionCodeGeneratorX86::VisitDiv(HDiv* div) {
   LocationSummary* locations = div->GetLocations();
-  Location out = locations->Out();
   Location first = locations->InAt(0);
   Location second = locations->InAt(1);
 
@@ -2879,14 +2949,36 @@
     }
 
     case Primitive::kPrimFloat: {
-      DCHECK(first.Equals(out));
-      __ divss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
+      if (second.IsFpuRegister()) {
+        __ divss(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
+      } else if (div->InputAt(1)->IsX86LoadFromConstantTable()) {
+        HX86LoadFromConstantTable* const_area = div->InputAt(1)->AsX86LoadFromConstantTable();
+        DCHECK(!const_area->NeedsMaterialization());
+        __ divss(first.AsFpuRegister<XmmRegister>(),
+                 codegen_->LiteralFloatAddress(
+                   const_area->GetConstant()->AsFloatConstant()->GetValue(),
+                   const_area->GetLocations()->InAt(0).AsRegister<Register>()));
+      } else {
+        DCHECK(second.IsStackSlot());
+        __ divss(first.AsFpuRegister<XmmRegister>(), Address(ESP, second.GetStackIndex()));
+      }
       break;
     }
 
     case Primitive::kPrimDouble: {
-      DCHECK(first.Equals(out));
-      __ divsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
+      if (second.IsFpuRegister()) {
+        __ divsd(first.AsFpuRegister<XmmRegister>(), second.AsFpuRegister<XmmRegister>());
+      } else if (div->InputAt(1)->IsX86LoadFromConstantTable()) {
+        HX86LoadFromConstantTable* const_area = div->InputAt(1)->AsX86LoadFromConstantTable();
+        DCHECK(!const_area->NeedsMaterialization());
+        __ divsd(first.AsFpuRegister<XmmRegister>(),
+                 codegen_->LiteralDoubleAddress(
+                   const_area->GetConstant()->AsDoubleConstant()->GetValue(),
+                   const_area->GetLocations()->InAt(0).AsRegister<Register>()));
+      } else {
+        DCHECK(second.IsDoubleStackSlot());
+        __ divsd(first.AsFpuRegister<XmmRegister>(), Address(ESP, second.GetStackIndex()));
+      }
       break;
     }
 
@@ -5085,6 +5177,245 @@
   // Will be generated at use site.
 }
 
+void LocationsBuilderX86::VisitX86ComputeBaseMethodAddress(
+    HX86ComputeBaseMethodAddress* insn) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(insn, LocationSummary::kNoCall);
+  locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86::VisitX86ComputeBaseMethodAddress(
+    HX86ComputeBaseMethodAddress* insn) {
+  LocationSummary* locations = insn->GetLocations();
+  Register reg = locations->Out().AsRegister<Register>();
+
+  // Generate call to next instruction.
+  Label next_instruction;
+  __ call(&next_instruction);
+  __ Bind(&next_instruction);
+
+  // Remember this offset for later use with constant area.
+  codegen_->SetMethodAddressOffset(GetAssembler()->CodeSize());
+
+  // Grab the return address off the stack.
+  __ popl(reg);
+}
+
+void LocationsBuilderX86::VisitX86LoadFromConstantTable(
+    HX86LoadFromConstantTable* insn) {
+  LocationSummary* locations =
+      new (GetGraph()->GetArena()) LocationSummary(insn, LocationSummary::kNoCall);
+
+  locations->SetInAt(0, Location::RequiresRegister());
+  locations->SetInAt(1, Location::ConstantLocation(insn->GetConstant()));
+
+  // If we don't need to be materialized, we only need the inputs to be set.
+  if (!insn->NeedsMaterialization()) {
+    return;
+  }
+
+  switch (insn->GetType()) {
+    case Primitive::kPrimFloat:
+    case Primitive::kPrimDouble:
+      locations->SetOut(Location::RequiresFpuRegister());
+      break;
+
+    case Primitive::kPrimInt:
+      locations->SetOut(Location::RequiresRegister());
+      break;
+
+    default:
+      LOG(FATAL) << "Unsupported x86 constant area type " << insn->GetType();
+  }
+}
+
+void InstructionCodeGeneratorX86::VisitX86LoadFromConstantTable(HX86LoadFromConstantTable* insn) {
+  if (!insn->NeedsMaterialization()) {
+    return;
+  }
+
+  LocationSummary* locations = insn->GetLocations();
+  Location out = locations->Out();
+  Register const_area = locations->InAt(0).AsRegister<Register>();
+  HConstant *value = insn->GetConstant();
+
+  switch (insn->GetType()) {
+    case Primitive::kPrimFloat:
+      __ movss(out.AsFpuRegister<XmmRegister>(),
+               codegen_->LiteralFloatAddress(value->AsFloatConstant()->GetValue(), const_area));
+      break;
+
+    case Primitive::kPrimDouble:
+      __ movsd(out.AsFpuRegister<XmmRegister>(),
+               codegen_->LiteralDoubleAddress(value->AsDoubleConstant()->GetValue(), const_area));
+      break;
+
+    case Primitive::kPrimInt:
+      __ movl(out.AsRegister<Register>(),
+              codegen_->LiteralInt32Address(value->AsIntConstant()->GetValue(), const_area));
+      break;
+
+    default:
+      LOG(FATAL) << "Unsupported x86 constant area type " << insn->GetType();
+  }
+}
+
+void CodeGeneratorX86::Finalize(CodeAllocator* allocator) {
+  // Generate the constant area if needed.
+  X86Assembler* assembler = GetAssembler();
+  if (!assembler->IsConstantAreaEmpty()) {
+    // Align to 4 byte boundary to reduce cache misses, as the data is 4 and 8
+    // byte values.
+    assembler->Align(4, 0);
+    constant_area_start_ = assembler->CodeSize();
+    assembler->AddConstantArea();
+  }
+
+  // And finish up.
+  CodeGenerator::Finalize(allocator);
+}
+
+/**
+ * Class to handle late fixup of offsets into constant area.
+ */
+class RIPFixup : public AssemblerFixup, public ArenaObject<kArenaAllocMisc> {
+ public:
+  RIPFixup(const CodeGeneratorX86& codegen, int offset)
+      : codegen_(codegen), offset_into_constant_area_(offset) {}
+
+ private:
+  void Process(const MemoryRegion& region, int pos) OVERRIDE {
+    // Patch the correct offset for the instruction.  The place to patch is the
+    // last 4 bytes of the instruction.
+    // The value to patch is the distance from the offset in the constant area
+    // from the address computed by the HX86ComputeBaseMethodAddress instruction.
+    int32_t constant_offset = codegen_.ConstantAreaStart() + offset_into_constant_area_;
+    int32_t relative_position = constant_offset - codegen_.GetMethodAddressOffset();;
+
+    // Patch in the right value.
+    region.StoreUnaligned<int32_t>(pos - 4, relative_position);
+  }
+
+  const CodeGeneratorX86& codegen_;
+
+  // Location in constant area that the fixup refers to.
+  int offset_into_constant_area_;
+};
+
+Address CodeGeneratorX86::LiteralDoubleAddress(double v, Register reg) {
+  AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddDouble(v));
+  return Address(reg, kDummy32BitOffset, fixup);
+}
+
+Address CodeGeneratorX86::LiteralFloatAddress(float v, Register reg) {
+  AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddFloat(v));
+  return Address(reg, kDummy32BitOffset, fixup);
+}
+
+Address CodeGeneratorX86::LiteralInt32Address(int32_t v, Register reg) {
+  AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddInt32(v));
+  return Address(reg, kDummy32BitOffset, fixup);
+}
+
+Address CodeGeneratorX86::LiteralInt64Address(int64_t v, Register reg) {
+  AssemblerFixup* fixup = new (GetGraph()->GetArena()) RIPFixup(*this, __ AddInt64(v));
+  return Address(reg, kDummy32BitOffset, fixup);
+}
+
+/**
+ * Finds instructions that need the constant area base as an input.
+ */
+class ConstantHandlerVisitor : public HGraphVisitor {
+ public:
+  explicit ConstantHandlerVisitor(HGraph* graph) : HGraphVisitor(graph), base_(nullptr) {}
+
+ private:
+  void VisitAdd(HAdd* add) OVERRIDE {
+    BinaryFP(add);
+  }
+
+  void VisitSub(HSub* sub) OVERRIDE {
+    BinaryFP(sub);
+  }
+
+  void VisitMul(HMul* mul) OVERRIDE {
+    BinaryFP(mul);
+  }
+
+  void VisitDiv(HDiv* div) OVERRIDE {
+    BinaryFP(div);
+  }
+
+  void VisitReturn(HReturn* ret) OVERRIDE {
+    HConstant* value = ret->InputAt(0)->AsConstant();
+    if ((value != nullptr && Primitive::IsFloatingPointType(value->GetType()))) {
+      ReplaceInput(ret, value, 0, true);
+    }
+  }
+
+  void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE {
+    HandleInvoke(invoke);
+  }
+
+  void VisitInvokeVirtual(HInvokeVirtual* invoke) OVERRIDE {
+    HandleInvoke(invoke);
+  }
+
+  void VisitInvokeInterface(HInvokeInterface* invoke) OVERRIDE {
+    HandleInvoke(invoke);
+  }
+
+  void BinaryFP(HBinaryOperation* bin) {
+    HConstant* rhs = bin->InputAt(1)->AsConstant();
+    if (rhs != nullptr && Primitive::IsFloatingPointType(bin->GetResultType())) {
+      ReplaceInput(bin, rhs, 1, false);
+    }
+  }
+
+  void InitializeConstantAreaPointer(HInstruction* user) {
+    // Ensure we only initialize the pointer once.
+    if (base_ != nullptr) {
+      return;
+    }
+
+    HGraph* graph = GetGraph();
+    HBasicBlock* entry = graph->GetEntryBlock();
+    base_ = new (graph->GetArena()) HX86ComputeBaseMethodAddress();
+    HInstruction* insert_pos = (user->GetBlock() == entry) ? user : entry->GetLastInstruction();
+    entry->InsertInstructionBefore(base_, insert_pos);
+    DCHECK(base_ != nullptr);
+  }
+
+  void ReplaceInput(HInstruction* insn, HConstant* value, int input_index, bool materialize) {
+    InitializeConstantAreaPointer(insn);
+    HGraph* graph = GetGraph();
+    HBasicBlock* block = insn->GetBlock();
+    HX86LoadFromConstantTable* load_constant =
+        new (graph->GetArena()) HX86LoadFromConstantTable(base_, value, materialize);
+    block->InsertInstructionBefore(load_constant, insn);
+    insn->ReplaceInput(load_constant, input_index);
+  }
+
+  void HandleInvoke(HInvoke* invoke) {
+    // Ensure that we can load FP arguments from the constant area.
+    for (size_t i = 0, e = invoke->InputCount(); i < e; i++) {
+      HConstant* input = invoke->InputAt(i)->AsConstant();
+      if (input != nullptr && Primitive::IsFloatingPointType(input->GetType())) {
+        ReplaceInput(invoke, input, i, true);
+      }
+    }
+  }
+
+  // The generated HX86ComputeBaseMethodAddress in the entry block needed as an
+  // input to the HX86LoadFromConstantTable instructions.
+  HX86ComputeBaseMethodAddress* base_;
+};
+
+void ConstantAreaFixups::Run() {
+  ConstantHandlerVisitor visitor(graph_);
+  visitor.VisitInsertionOrder();
+}
+
 #undef __
 
 }  // namespace x86
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 17787a8..edf289d 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -325,6 +325,25 @@
     return isa_features_;
   }
 
+  void SetMethodAddressOffset(int32_t offset) {
+    method_address_offset_ = offset;
+  }
+
+  int32_t GetMethodAddressOffset() const {
+    return method_address_offset_;
+  }
+
+  int32_t ConstantAreaStart() const {
+    return constant_area_start_;
+  }
+
+  Address LiteralDoubleAddress(double v, Register reg);
+  Address LiteralFloatAddress(float v, Register reg);
+  Address LiteralInt32Address(int32_t v, Register reg);
+  Address LiteralInt64Address(int64_t v, Register reg);
+
+  void Finalize(CodeAllocator* allocator) OVERRIDE;
+
  private:
   // Labels for each block that will be compiled.
   GrowableArray<Label> block_labels_;
@@ -339,6 +358,20 @@
   ArenaDeque<MethodPatchInfo<Label>> method_patches_;
   ArenaDeque<MethodPatchInfo<Label>> relative_call_patches_;
 
+  // Offset to the start of the constant area in the assembled code.
+  // Used for fixups to the constant area.
+  int32_t constant_area_start_;
+
+  // If there is a HX86ComputeBaseMethodAddress instruction in the graph
+  // (which shall be the sole instruction of this kind), subtracting this offset
+  // from the value contained in the out register of this HX86ComputeBaseMethodAddress
+  // instruction gives the address of the start of this method.
+  int32_t method_address_offset_;
+
+  // When we don't know the proper offset for the value, we use kDummy32BitOffset.
+  // The correct value will be inserted when processing Assembler fixups.
+  static constexpr int32_t kDummy32BitOffset = 256;
+
   DISALLOW_COPY_AND_ASSIGN(CodeGeneratorX86);
 };
 
diff --git a/compiler/optimizing/constant_area_fixups_x86.h b/compiler/optimizing/constant_area_fixups_x86.h
new file mode 100644
index 0000000..4138039
--- /dev/null
+++ b/compiler/optimizing/constant_area_fixups_x86.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_CONSTANT_AREA_FIXUPS_X86_H_
+#define ART_COMPILER_OPTIMIZING_CONSTANT_AREA_FIXUPS_X86_H_
+
+#include "nodes.h"
+#include "optimization.h"
+
+namespace art {
+namespace x86 {
+
+class ConstantAreaFixups : public HOptimization {
+ public:
+  ConstantAreaFixups(HGraph* graph, OptimizingCompilerStats* stats)
+      : HOptimization(graph, "constant_area_fixups_x86", stats) {}
+
+  void Run() OVERRIDE;
+};
+
+}  // namespace x86
+}  // namespace art
+
+#endif  // ART_COMPILER_OPTIMIZING_CONSTANT_AREA_FIXUPS_X86_H_
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index fef6f21..dc0a5df 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1076,7 +1076,9 @@
 
 #define FOR_EACH_CONCRETE_INSTRUCTION_MIPS64(M)
 
-#define FOR_EACH_CONCRETE_INSTRUCTION_X86(M)
+#define FOR_EACH_CONCRETE_INSTRUCTION_X86(M)                            \
+  M(X86ComputeBaseMethodAddress, Instruction)                           \
+  M(X86LoadFromConstantTable, Instruction)
 
 #define FOR_EACH_CONCRETE_INSTRUCTION_X86_64(M)
 
@@ -4955,6 +4957,14 @@
   DISALLOW_COPY_AND_ASSIGN(HParallelMove);
 };
 
+}  // namespace art
+
+#ifdef ART_ENABLE_CODEGEN_x86
+#include "nodes_x86.h"
+#endif
+
+namespace art {
+
 class HGraphVisitor : public ValueObject {
  public:
   explicit HGraphVisitor(HGraph* graph) : graph_(graph) {}
diff --git a/compiler/optimizing/nodes_x86.h b/compiler/optimizing/nodes_x86.h
new file mode 100644
index 0000000..ddc5730
--- /dev/null
+++ b/compiler/optimizing/nodes_x86.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_NODES_X86_H_
+#define ART_COMPILER_OPTIMIZING_NODES_X86_H_
+
+namespace art {
+
+// Compute the address of the method for X86 Constant area support.
+class HX86ComputeBaseMethodAddress : public HExpression<0> {
+ public:
+  // Treat the value as an int32_t, but it is really a 32 bit native pointer.
+  HX86ComputeBaseMethodAddress() : HExpression(Primitive::kPrimInt, SideEffects::None()) {}
+
+  DECLARE_INSTRUCTION(X86ComputeBaseMethodAddress);
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(HX86ComputeBaseMethodAddress);
+};
+
+// Load a constant value from the constant table.
+class HX86LoadFromConstantTable : public HExpression<2> {
+ public:
+  HX86LoadFromConstantTable(HX86ComputeBaseMethodAddress* method_base,
+                            HConstant* constant,
+                            bool needs_materialization = true)
+      : HExpression(constant->GetType(), SideEffects::None()),
+        needs_materialization_(needs_materialization) {
+    SetRawInputAt(0, method_base);
+    SetRawInputAt(1, constant);
+  }
+
+  bool NeedsMaterialization() const { return needs_materialization_; }
+
+  HX86ComputeBaseMethodAddress* GetBaseMethodAddress() const {
+    return InputAt(0)->AsX86ComputeBaseMethodAddress();
+  }
+
+  HConstant* GetConstant() const {
+    return InputAt(1)->AsConstant();
+  }
+
+  DECLARE_INSTRUCTION(X86LoadFromConstantTable);
+
+ private:
+  const bool needs_materialization_;
+
+  DISALLOW_COPY_AND_ASSIGN(HX86LoadFromConstantTable);
+};
+
+}  // namespace art
+
+#endif  // ART_COMPILER_OPTIMIZING_NODES_X86_H_
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 91b03d4..f549ba8 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -23,6 +23,10 @@
 #include "instruction_simplifier_arm64.h"
 #endif
 
+#ifdef ART_ENABLE_CODEGEN_x86
+#include "constant_area_fixups_x86.h"
+#endif
+
 #include "art_method-inl.h"
 #include "base/arena_allocator.h"
 #include "base/arena_containers.h"
@@ -424,6 +428,17 @@
       break;
     }
 #endif
+#ifdef ART_ENABLE_CODEGEN_x86
+    case kX86: {
+      x86::ConstantAreaFixups* constant_area_fixups =
+          new (arena) x86::ConstantAreaFixups(graph, stats);
+      HOptimization* x86_optimizations[] = {
+        constant_area_fixups
+      };
+      RunOptimizations(x86_optimizations, arraysize(x86_optimizations), pass_observer);
+      break;
+    }
+#endif
     default:
       break;
   }
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index a03f857..e3962b4 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1750,6 +1750,10 @@
   for (int i = 1; i < length; i++) {
     EmitUint8(operand.encoding_[i]);
   }
+  AssemblerFixup* fixup = operand.GetFixup();
+  if (fixup != nullptr) {
+    EmitFixup(fixup);
+  }
 }
 
 
@@ -2322,5 +2326,56 @@
 #undef __
 }
 
+void X86Assembler::AddConstantArea() {
+  const std::vector<int32_t>& area = constant_area_.GetBuffer();
+  // Generate the data for the literal area.
+  for (size_t i = 0, e = area.size(); i < e; i++) {
+    AssemblerBuffer::EnsureCapacity ensured(&buffer_);
+    EmitInt32(area[i]);
+  }
+}
+
+int ConstantArea::AddInt32(int32_t v) {
+  for (size_t i = 0, e = buffer_.size(); i < e; i++) {
+    if (v == buffer_[i]) {
+      return i * kEntrySize;
+    }
+  }
+
+  // Didn't match anything.
+  int result = buffer_.size() * kEntrySize;
+  buffer_.push_back(v);
+  return result;
+}
+
+int ConstantArea::AddInt64(int64_t v) {
+  int32_t v_low = Low32Bits(v);
+  int32_t v_high = High32Bits(v);
+  if (buffer_.size() > 1) {
+    // Ensure we don't pass the end of the buffer.
+    for (size_t i = 0, e = buffer_.size() - 1; i < e; i++) {
+      if (v_low == buffer_[i] && v_high == buffer_[i + 1]) {
+        return i * kEntrySize;
+      }
+    }
+  }
+
+  // Didn't match anything.
+  int result = buffer_.size() * kEntrySize;
+  buffer_.push_back(v_low);
+  buffer_.push_back(v_high);
+  return result;
+}
+
+int ConstantArea::AddDouble(double v) {
+  // Treat the value as a 64-bit integer value.
+  return AddInt64(bit_cast<int64_t, double>(v));
+}
+
+int ConstantArea::AddFloat(float v) {
+  // Treat the value as a 32-bit integer value.
+  return AddInt32(bit_cast<int32_t, float>(v));
+}
+
 }  // namespace x86
 }  // namespace art
diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h
index 0c90f28..7d7b3d3 100644
--- a/compiler/utils/x86/assembler_x86.h
+++ b/compiler/utils/x86/assembler_x86.h
@@ -86,7 +86,7 @@
 
  protected:
   // Operand can be sub classed (e.g: Address).
-  Operand() : length_(0) { }
+  Operand() : length_(0), fixup_(nullptr) { }
 
   void SetModRM(int mod_in, Register rm_in) {
     CHECK_EQ(mod_in & ~3, 0);
@@ -113,11 +113,23 @@
     length_ += disp_size;
   }
 
+  AssemblerFixup* GetFixup() const {
+    return fixup_;
+  }
+
+  void SetFixup(AssemblerFixup* fixup) {
+    fixup_ = fixup;
+  }
+
  private:
   uint8_t length_;
   uint8_t encoding_[6];
 
-  explicit Operand(Register reg) { SetModRM(3, reg); }
+  // A fixup can be associated with the operand, in order to be applied after the
+  // code has been generated. This is used for constant area fixups.
+  AssemblerFixup* fixup_;
+
+  explicit Operand(Register reg) : fixup_(nullptr) { SetModRM(3, reg); }
 
   // Get the operand encoding byte at the given index.
   uint8_t encoding_at(int index_in) const {
@@ -136,6 +148,11 @@
     Init(base_in, disp);
   }
 
+  Address(Register base_in, int32_t disp, AssemblerFixup *fixup) {
+    Init(base_in, disp);
+    SetFixup(fixup);
+  }
+
   Address(Register base_in, Offset disp) {
     Init(base_in, disp.Int32Value());
   }
@@ -226,6 +243,50 @@
   DISALLOW_COPY_AND_ASSIGN(NearLabel);
 };
 
+/**
+ * Class to handle constant area values.
+ */
+class ConstantArea {
+ public:
+  ConstantArea() {}
+
+  // Add a double to the constant area, returning the offset into
+  // the constant area where the literal resides.
+  int AddDouble(double v);
+
+  // Add a float to the constant area, returning the offset into
+  // the constant area where the literal resides.
+  int AddFloat(float v);
+
+  // Add an int32_t to the constant area, returning the offset into
+  // the constant area where the literal resides.
+  int AddInt32(int32_t v);
+
+  // Add an int64_t to the constant area, returning the offset into
+  // the constant area where the literal resides.
+  int AddInt64(int64_t v);
+
+  bool IsEmpty() const {
+    return buffer_.size() == 0;
+  }
+
+  const std::vector<int32_t>& GetBuffer() const {
+    return buffer_;
+  }
+
+  void AddFixup(AssemblerFixup* fixup) {
+    fixups_.push_back(fixup);
+  }
+
+  const std::vector<AssemblerFixup*>& GetFixups() const {
+    return fixups_;
+  }
+
+ private:
+  static constexpr size_t kEntrySize = sizeof(int32_t);
+  std::vector<int32_t> buffer_;
+  std::vector<AssemblerFixup*> fixups_;
+};
 
 class X86Assembler FINAL : public Assembler {
  public:
@@ -667,6 +728,29 @@
     }
   }
 
+  // Add a double to the constant area, returning the offset into
+  // the constant area where the literal resides.
+  int AddDouble(double v) { return constant_area_.AddDouble(v); }
+
+  // Add a float to the constant area, returning the offset into
+  // the constant area where the literal resides.
+  int AddFloat(float v)   { return constant_area_.AddFloat(v); }
+
+  // Add an int32_t to the constant area, returning the offset into
+  // the constant area where the literal resides.
+  int AddInt32(int32_t v) { return constant_area_.AddInt32(v); }
+
+  // Add an int64_t to the constant area, returning the offset into
+  // the constant area where the literal resides.
+  int AddInt64(int64_t v) { return constant_area_.AddInt64(v); }
+
+  // Add the contents of the constant area to the assembler buffer.
+  void AddConstantArea();
+
+  // Is the constant area empty? Return true if there are no literals in the constant area.
+  bool IsConstantAreaEmpty() const { return constant_area_.IsEmpty(); }
+  void AddConstantAreaFixup(AssemblerFixup* fixup) { constant_area_.AddFixup(fixup); }
+
  private:
   inline void EmitUint8(uint8_t value);
   inline void EmitInt32(int32_t value);
@@ -685,6 +769,8 @@
   void EmitGenericShift(int rm, const Operand& operand, const Immediate& imm);
   void EmitGenericShift(int rm, const Operand& operand, Register shifter);
 
+  ConstantArea constant_area_;
+
   DISALLOW_COPY_AND_ASSIGN(X86Assembler);
 };