Version 3.30.3 (based on bleeding_edge revision r24397)

Removed the Isolate* field from literal nodes (Chromium issue 417697).

Squeeze the layout of expression nodes a bit (Chromium issue 417697).

Merged FeedbackSlotInterface into AstNode, removing the need for a 2nd vtable (Chromium issue 417697).

Extend CPU profiler with mapping ticks to source lines.

Remove support for parallel sweeping.

Introduce v8::Object::GetIsolate().

Performance and stability improvements on all platforms.

git-svn-id: https://v8.googlecode.com/svn/trunk@24398 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 36ab7b4..95b1f27 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,21 @@
+2014-10-03: Version 3.30.3
+
+        Removed the Isolate* field from literal nodes (Chromium issue 417697).
+
+        Squeeze the layout of expression nodes a bit (Chromium issue 417697).
+
+        Merged FeedbackSlotInterface into AstNode, removing the need for a 2nd
+        vtable (Chromium issue 417697).
+
+        Extend CPU profiler with mapping ticks to source lines.
+
+        Remove support for parallel sweeping.
+
+        Introduce v8::Object::GetIsolate().
+
+        Performance and stability improvements on all platforms.
+
+
 2014-10-02: Version 3.30.2
 
         Fix Hydrogen's BuildStore() (Chromium issue 417508).
diff --git a/include/v8.h b/include/v8.h
index 274dedb..2dc1bd8 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -2586,6 +2586,11 @@
    */
   Local<Value> CallAsConstructor(int argc, Handle<Value> argv[]);
 
+  /**
+   * Return the isolate to which the Object belongs to.
+   */
+  Isolate* GetIsolate();
+
   static Local<Object> New(Isolate* isolate);
 
   V8_INLINE static Object* Cast(Value* obj);
@@ -4913,6 +4918,9 @@
    * On Win64, embedders are advised to install function table callbacks for
    * these ranges, as default SEH won't be able to unwind through jitted code.
    *
+   * The first page of the code range is reserved for the embedder and is
+   * committed, writable, and executable.
+   *
    * Might be empty on other platforms.
    *
    * https://code.google.com/p/v8/issues/detail?id=3598
@@ -5904,7 +5912,7 @@
   static const int kNullValueRootIndex = 7;
   static const int kTrueValueRootIndex = 8;
   static const int kFalseValueRootIndex = 9;
-  static const int kEmptyStringRootIndex = 152;
+  static const int kEmptyStringRootIndex = 153;
 
   // The external allocation limit should be below 256 MB on all architectures
   // to avoid that resource-constrained embedders run low on memory.
diff --git a/src/accessors.cc b/src/accessors.cc
index 011372c..9bd6e5b 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -56,17 +56,6 @@
 }
 
 
-template <class C>
-static C* FindInstanceOf(Isolate* isolate, Object* obj) {
-  for (PrototypeIterator iter(isolate, obj,
-                              PrototypeIterator::START_AT_RECEIVER);
-       !iter.IsAtEnd(); iter.Advance()) {
-    if (Is<C>(iter.GetCurrent())) return C::cast(iter.GetCurrent());
-  }
-  return NULL;
-}
-
-
 static V8_INLINE bool CheckForName(Handle<Name> name,
                                    Handle<String> property_name,
                                    int offset,
@@ -916,11 +905,6 @@
 }
 
 
-Handle<Object> Accessors::FunctionGetPrototype(Handle<JSFunction> function) {
-  return GetFunctionPrototype(function->GetIsolate(), function);
-}
-
-
 Handle<Object> Accessors::FunctionSetPrototype(Handle<JSFunction> function,
                                                Handle<Object> prototype) {
   DCHECK(function->should_have_prototype());
diff --git a/src/accessors.h b/src/accessors.h
index 8fc1f84..1677c1d 100644
--- a/src/accessors.h
+++ b/src/accessors.h
@@ -68,7 +68,6 @@
   // Accessor functions called directly from the runtime system.
   static Handle<Object> FunctionSetPrototype(Handle<JSFunction> object,
                                              Handle<Object> value);
-  static Handle<Object> FunctionGetPrototype(Handle<JSFunction> object);
   static Handle<Object> FunctionGetArguments(Handle<JSFunction> object);
 
   // Accessor infos.
diff --git a/src/api.cc b/src/api.cc
index f70d0e0..512eaeb 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -5608,6 +5608,12 @@
 }
 
 
+Isolate* v8::Object::GetIsolate() {
+  i::Isolate* i_isolate = Utils::OpenHandle(this)->GetIsolate();
+  return reinterpret_cast<Isolate*>(i_isolate);
+}
+
+
 Local<v8::Object> v8::Object::New(Isolate* isolate) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   LOG_API(i_isolate, "Object::New");
diff --git a/src/ast-value-factory.cc b/src/ast-value-factory.cc
index ea8474f..4df6ac0 100644
--- a/src/ast-value-factory.cc
+++ b/src/ast-value-factory.cc
@@ -117,14 +117,15 @@
 
 
 bool AstRawString::Compare(void* a, void* b) {
-  AstRawString* string1 = reinterpret_cast<AstRawString*>(a);
-  AstRawString* string2 = reinterpret_cast<AstRawString*>(b);
-  if (string1->is_one_byte_ != string2->is_one_byte_) return false;
-  if (string1->hash_ != string2->hash_) return false;
-  int length = string1->literal_bytes_.length();
-  if (string2->literal_bytes_.length() != length) return false;
-  return memcmp(string1->literal_bytes_.start(),
-                string2->literal_bytes_.start(), length) == 0;
+  return *static_cast<AstRawString*>(a) == *static_cast<AstRawString*>(b);
+}
+
+bool AstRawString::operator==(const AstRawString& rhs) const {
+  if (is_one_byte_ != rhs.is_one_byte_) return false;
+  if (hash_ != rhs.hash_) return false;
+  int len = literal_bytes_.length();
+  if (rhs.literal_bytes_.length() != len) return false;
+  return memcmp(literal_bytes_.start(), rhs.literal_bytes_.start(), len) == 0;
 }
 
 
diff --git a/src/ast-value-factory.h b/src/ast-value-factory.h
index 2f84163..82e6e6b 100644
--- a/src/ast-value-factory.h
+++ b/src/ast-value-factory.h
@@ -94,6 +94,8 @@
   }
   static bool Compare(void* a, void* b);
 
+  bool operator==(const AstRawString& rhs) const;
+
  private:
   friend class AstValueFactory;
   friend class AstRawStringInternalizationKey;
diff --git a/src/ast.cc b/src/ast.cc
index 3e86551..2d96f3f 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -560,7 +560,7 @@
 // once we use the common type field in the AST consistently.
 
 void Expression::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
-  to_boolean_types_ = oracle->ToBooleanTypes(test_id());
+  set_to_boolean_types(oracle->ToBooleanTypes(test_id()));
 }
 
 
@@ -1126,20 +1126,19 @@
 #undef DONT_CACHE_NODE
 
 
-Handle<String> Literal::ToString() {
-  if (value_->IsString()) return value_->AsString()->string();
-  DCHECK(value_->IsNumber());
-  char arr[100];
-  Vector<char> buffer(arr, arraysize(arr));
-  const char* str;
-  if (value()->IsSmi()) {
-    // Optimization only, the heap number case would subsume this.
-    SNPrintF(buffer, "%d", Smi::cast(*value())->value());
-    str = arr;
-  } else {
-    str = DoubleToCString(value()->Number(), buffer);
-  }
-  return isolate_->factory()->NewStringFromAsciiChecked(str);
+uint32_t Literal::Hash() {
+  return raw_value()->IsString()
+             ? raw_value()->AsString()->hash()
+             : ComputeLongHash(double_to_uint64(raw_value()->AsNumber()));
+}
+
+
+// static
+bool Literal::Match(void* literal1, void* literal2) {
+  const AstValue* x = static_cast<Literal*>(literal1)->raw_value();
+  const AstValue* y = static_cast<Literal*>(literal2)->raw_value();
+  return (x->IsString() && y->IsString() && *x->AsString() == *y->AsString()) ||
+         (x->IsNumber() && y->IsNumber() && x->AsNumber() == y->AsNumber());
 }
 
 
diff --git a/src/ast.h b/src/ast.h
index f695e12..5ef4713 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -11,7 +11,6 @@
 #include "src/ast-value-factory.h"
 #include "src/bailout-reason.h"
 #include "src/factory.h"
-#include "src/feedback-slots.h"
 #include "src/interface.h"
 #include "src/isolate.h"
 #include "src/jsregexp.h"
@@ -233,6 +232,17 @@
   virtual IterationStatement* AsIterationStatement() { return NULL; }
   virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; }
 
+  // The interface for feedback slots, with default no-op implementations for
+  // node types which don't actually have this. Note that this is conceptually
+  // not really nice, but multiple inheritance would introduce yet another
+  // vtable entry per node, something we don't want for space reasons.
+  static const int kInvalidFeedbackSlot = -1;
+  virtual int ComputeFeedbackSlotCount() {
+    UNREACHABLE();
+    return 0;
+  }
+  virtual void SetFirstFeedbackSlot(int slot) { UNREACHABLE(); }
+
  protected:
   // Some nodes re-use bailout IDs for type feedback.
   static TypeFeedbackId reuse(BailoutId id) {
@@ -353,9 +363,12 @@
   void set_bounds(Bounds bounds) { bounds_ = bounds; }
 
   // Whether the expression is parenthesized
-  unsigned parenthesization_level() const { return parenthesization_level_; }
-  bool is_parenthesized() const { return parenthesization_level_ > 0; }
-  void increase_parenthesization_level() { ++parenthesization_level_; }
+  bool is_parenthesized() const { return is_parenthesized_; }
+  bool is_multi_parenthesized() const { return is_multi_parenthesized_; }
+  void increase_parenthesization_level() {
+    is_multi_parenthesized_ = is_parenthesized_;
+    is_parenthesized_ = true;
+  }
 
   // Type feedback information for assignments and properties.
   virtual bool IsMonomorphic() {
@@ -381,16 +394,18 @@
  protected:
   Expression(Zone* zone, int pos, IdGen* id_gen)
       : AstNode(pos),
+        is_parenthesized_(false),
+        is_multi_parenthesized_(false),
         bounds_(Bounds::Unbounded(zone)),
-        parenthesization_level_(0),
         id_(id_gen->GetNextId()),
         test_id_(id_gen->GetNextId()) {}
   void set_to_boolean_types(byte types) { to_boolean_types_ = types; }
 
  private:
-  Bounds bounds_;
   byte to_boolean_types_;
-  unsigned parenthesization_level_;
+  bool is_parenthesized_ : 1;
+  bool is_multi_parenthesized_ : 1;
+  Bounds bounds_;
 
   const BailoutId id_;
   const TypeFeedbackId test_id_;
@@ -914,8 +929,7 @@
 };
 
 
-class ForInStatement FINAL : public ForEachStatement,
-    public FeedbackSlotInterface {
+class ForInStatement FINAL : public ForEachStatement {
  public:
   DECLARE_NODE_TYPE(ForInStatement)
 
@@ -1371,28 +1385,17 @@
 
   // Support for using Literal as a HashMap key. NOTE: Currently, this works
   // only for string and number literals!
-  uint32_t Hash() { return ToString()->Hash(); }
-
-  static bool Match(void* literal1, void* literal2) {
-    Handle<String> s1 = static_cast<Literal*>(literal1)->ToString();
-    Handle<String> s2 = static_cast<Literal*>(literal2)->ToString();
-    return String::Equals(s1, s2);
-  }
+  uint32_t Hash();
+  static bool Match(void* literal1, void* literal2);
 
   TypeFeedbackId LiteralFeedbackId() const { return reuse(id()); }
 
  protected:
   Literal(Zone* zone, const AstValue* value, int position, IdGen* id_gen)
-      : Expression(zone, position, id_gen),
-        value_(value),
-        isolate_(zone->isolate()) {}
+      : Expression(zone, position, id_gen), value_(value) {}
 
  private:
-  Handle<String> ToString();
-
   const AstValue* value_;
-  // TODO(dcarney): remove.  this is only needed for Match and Hash.
-  Isolate* isolate_;
 };
 
 
@@ -1628,7 +1631,7 @@
 };
 
 
-class VariableProxy FINAL : public Expression, public FeedbackSlotInterface {
+class VariableProxy FINAL : public Expression {
  public:
   DECLARE_NODE_TYPE(VariableProxy)
 
@@ -1672,7 +1675,7 @@
 };
 
 
-class Property FINAL : public Expression, public FeedbackSlotInterface {
+class Property FINAL : public Expression {
  public:
   DECLARE_NODE_TYPE(Property)
 
@@ -1741,7 +1744,7 @@
 };
 
 
-class Call FINAL : public Expression, public FeedbackSlotInterface {
+class Call FINAL : public Expression {
  public:
   DECLARE_NODE_TYPE(Call)
 
@@ -1844,7 +1847,7 @@
 };
 
 
-class CallNew FINAL : public Expression, public FeedbackSlotInterface {
+class CallNew FINAL : public Expression {
  public:
   DECLARE_NODE_TYPE(CallNew)
 
@@ -1907,7 +1910,7 @@
 // language construct. Instead it is used to call a C or JS function
 // with a set of arguments. This is used from the builtins that are
 // implemented in JavaScript (see "v8natives.js").
-class CallRuntime FINAL : public Expression, public FeedbackSlotInterface {
+class CallRuntime FINAL : public Expression {
  public:
   DECLARE_NODE_TYPE(CallRuntime)
 
@@ -2223,7 +2226,7 @@
 };
 
 
-class Yield FINAL : public Expression, public FeedbackSlotInterface {
+class Yield FINAL : public Expression {
  public:
   DECLARE_NODE_TYPE(Yield)
 
@@ -3038,7 +3041,7 @@
     dont_turbofan_reason_ = reason;
   }
 
-  void add_slot_node(FeedbackSlotInterface* slot_node) {
+  void add_slot_node(AstNode* slot_node) {
     int count = slot_node->ComputeFeedbackSlotCount();
     if (count > 0) {
       slot_node->SetFirstFeedbackSlot(properties_.feedback_slots());
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 250562a..405f665 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -57,6 +57,8 @@
     Handle<String> source_code = isolate_->factory()
                                      ->NewExternalStringFromOneByte(resource)
                                      .ToHandleChecked();
+    // Mark this external string with a special map.
+    source_code->set_map(isolate_->heap()->native_source_string_map());
     heap->natives_source_cache()->set(index, *source_code);
   }
   Handle<Object> cached_source(heap->natives_source_cache()->get(index),
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index b3bb4f3..7515084 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -271,8 +271,6 @@
   }
   CodeStubGraphBuilder<Stub> builder(isolate, stub);
   LChunk* chunk = OptimizeGraph(builder.CreateGraph());
-  // TODO(yangguo) remove this once the code serializer handles code stubs.
-  if (FLAG_serialize_toplevel) chunk->info()->PrepareForSerializing();
   Handle<Code> code = chunk->Codegen();
   if (FLAG_profile_hydrogen_code_stub_compilation) {
     OFStream os(stdout);
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 5a07cdd..195b315 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -105,9 +105,6 @@
   // Generate the new code.
   MacroAssembler masm(isolate(), NULL, 256);
 
-  // TODO(yangguo) remove this once the code serializer handles code stubs.
-  if (FLAG_serialize_toplevel) masm.enable_serializer();
-
   {
     // Update the static counter each time a new code stub is generated.
     isolate()->counters()->code_stubs()->Increment();
@@ -224,9 +221,8 @@
     CODE_STUB_LIST(DEF_CASE)
 #undef DEF_CASE
     case NUMBER_OF_IDS:
-      UNREACHABLE();
     case NoCache:
-      *value_out = NULL;
+      UNREACHABLE();
       break;
   }
 }
diff --git a/src/compiler/access-builder.cc b/src/compiler/access-builder.cc
index 7f3b084..959c60a 100644
--- a/src/compiler/access-builder.cc
+++ b/src/compiler/access-builder.cc
@@ -11,43 +11,43 @@
 
 // static
 FieldAccess AccessBuilder::ForMap() {
-  return {kTaggedBase, HeapObject::kMapOffset, Handle<Name>(), Type::Any(),
+  return {kTaggedBase, HeapObject::kMapOffset, MaybeHandle<Name>(), Type::Any(),
           kMachAnyTagged};
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSObjectProperties() {
-  return {kTaggedBase, JSObject::kPropertiesOffset, Handle<Name>(), Type::Any(),
-          kMachAnyTagged};
+  return {kTaggedBase, JSObject::kPropertiesOffset, MaybeHandle<Name>(),
+          Type::Any(), kMachAnyTagged};
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSObjectElements() {
-  return {kTaggedBase, JSObject::kElementsOffset, Handle<Name>(),
+  return {kTaggedBase, JSObject::kElementsOffset, MaybeHandle<Name>(),
           Type::Internal(), kMachAnyTagged};
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSFunctionContext() {
-  return {kTaggedBase, JSFunction::kContextOffset, Handle<Name>(),
+  return {kTaggedBase, JSFunction::kContextOffset, MaybeHandle<Name>(),
           Type::Internal(), kMachAnyTagged};
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSArrayBufferBackingStore() {
-  return {kTaggedBase, JSArrayBuffer::kBackingStoreOffset, Handle<Name>(),
+  return {kTaggedBase, JSArrayBuffer::kBackingStoreOffset, MaybeHandle<Name>(),
           Type::UntaggedPtr(), kMachPtr};
 }
 
 
 // static
 FieldAccess AccessBuilder::ForExternalArrayPointer() {
-  return {kTaggedBase, ExternalArray::kExternalPointerOffset, Handle<Name>(),
-          Type::UntaggedPtr(), kMachPtr};
+  return {kTaggedBase, ExternalArray::kExternalPointerOffset,
+          MaybeHandle<Name>(), Type::UntaggedPtr(), kMachPtr};
 }
 
 
diff --git a/src/compiler/ia32/code-generator-ia32.cc b/src/compiler/ia32/code-generator-ia32.cc
index 2b51b6f..83a8630 100644
--- a/src/compiler/ia32/code-generator-ia32.cc
+++ b/src/compiler/ia32/code-generator-ia32.cc
@@ -33,8 +33,6 @@
 
   Operand OutputOperand() { return ToOperand(instr_->Output()); }
 
-  Operand TempOperand(int index) { return ToOperand(instr_->TempAt(index)); }
-
   Operand ToOperand(InstructionOperand* op, int extra = 0) {
     if (op->IsRegister()) {
       DCHECK(extra == 0);
@@ -283,30 +281,30 @@
       break;
     case kIA32Shl:
       if (HasImmediateInput(instr, 1)) {
-        __ shl(i.OutputRegister(), i.InputInt5(1));
+        __ shl(i.OutputOperand(), i.InputInt5(1));
       } else {
-        __ shl_cl(i.OutputRegister());
+        __ shl_cl(i.OutputOperand());
       }
       break;
     case kIA32Shr:
       if (HasImmediateInput(instr, 1)) {
-        __ shr(i.OutputRegister(), i.InputInt5(1));
+        __ shr(i.OutputOperand(), i.InputInt5(1));
       } else {
-        __ shr_cl(i.OutputRegister());
+        __ shr_cl(i.OutputOperand());
       }
       break;
     case kIA32Sar:
       if (HasImmediateInput(instr, 1)) {
-        __ sar(i.OutputRegister(), i.InputInt5(1));
+        __ sar(i.OutputOperand(), i.InputInt5(1));
       } else {
-        __ sar_cl(i.OutputRegister());
+        __ sar_cl(i.OutputOperand());
       }
       break;
     case kIA32Ror:
       if (HasImmediateInput(instr, 1)) {
-        __ ror(i.OutputRegister(), i.InputInt5(1));
+        __ ror(i.OutputOperand(), i.InputInt5(1));
       } else {
-        __ ror_cl(i.OutputRegister());
+        __ ror_cl(i.OutputOperand());
       }
       break;
     case kSSEFloat64Cmp:
diff --git a/src/compiler/ia32/instruction-selector-ia32.cc b/src/compiler/ia32/instruction-selector-ia32.cc
index 4ee05d2..dee53c9 100644
--- a/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/src/compiler/ia32/instruction-selector-ia32.cc
@@ -329,9 +329,8 @@
   Node* left = node->InputAt(0);
   Node* right = node->InputAt(1);
 
-  // TODO(turbofan): assembler only supports some addressing modes for shifts.
   if (g.CanBeImmediate(right)) {
-    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(left),
                    g.UseImmediate(right));
   } else {
     Int32BinopMatcher m(node);
@@ -341,7 +340,7 @@
         right = mright.left().node();
       }
     }
-    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(left),
                    g.UseFixed(right, ecx));
   }
 }
diff --git a/src/compiler/instruction-codes.h b/src/compiler/instruction-codes.h
index fd7c530..3bd12fe 100644
--- a/src/compiler/instruction-codes.h
+++ b/src/compiler/instruction-codes.h
@@ -13,6 +13,8 @@
 #include "src/compiler/arm64/instruction-codes-arm64.h"
 #elif V8_TARGET_ARCH_IA32
 #include "src/compiler/ia32/instruction-codes-ia32.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "src/compiler/mips/instruction-codes-mips.h"
 #elif V8_TARGET_ARCH_X64
 #include "src/compiler/x64/instruction-codes-x64.h"
 #else
diff --git a/src/compiler/js-typed-lowering.cc b/src/compiler/js-typed-lowering.cc
index 749fee5..594bfd3 100644
--- a/src/compiler/js-typed-lowering.cc
+++ b/src/compiler/js-typed-lowering.cc
@@ -533,35 +533,34 @@
   Type* key_type = NodeProperties::GetBounds(key).upper;
   Type* base_type = NodeProperties::GetBounds(base).upper;
   // TODO(mstarzinger): This lowering is not correct if:
-  //   a) The typed array turns external (i.e. MaterializeArrayBuffer)
-  //   b) The typed array or it's buffer is neutered.
-  //   c) The index is out of bounds.
+  //   a) The typed array or it's buffer is neutered.
+  //   b) The index is out of bounds.
   if (base_type->IsConstant() && key_type->Is(Type::Integral32()) &&
       base_type->AsConstant()->Value()->IsJSTypedArray()) {
     // JSLoadProperty(typed-array, int32)
-    JSTypedArray* array = JSTypedArray::cast(*base_type->AsConstant()->Value());
-    ElementsKind elements_kind = array->map()->elements_kind();
-    ExternalArrayType type = array->type();
-    uint32_t length;
-    CHECK(array->length()->ToUint32(&length));
-    ElementAccess element_access;
-    Node* elements = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForJSObjectElements()), base,
-        NodeProperties::GetEffectInput(node));
-    if (IsExternalArrayElementsKind(elements_kind)) {
-      elements = graph()->NewNode(
-          simplified()->LoadField(AccessBuilder::ForExternalArrayPointer()),
-          elements, NodeProperties::GetEffectInput(node));
-      element_access = AccessBuilder::ForTypedArrayElement(type, true);
-    } else {
-      DCHECK(IsFixedTypedArrayElementsKind(elements_kind));
-      element_access = AccessBuilder::ForTypedArrayElement(type, false);
+    Handle<JSTypedArray> array =
+        Handle<JSTypedArray>::cast(base_type->AsConstant()->Value());
+    if (IsExternalArrayElementsKind(array->map()->elements_kind())) {
+      Handle<JSArrayBuffer> buffer =
+          handle(JSArrayBuffer::cast(array->buffer()));
+      ExternalArrayType type = array->type();
+      uint32_t length;
+      CHECK(array->length()->ToUint32(&length));
+      Node* elements =
+          graph()->NewNode(simplified()->LoadField(
+                               AccessBuilder::ForJSArrayBufferBackingStore()),
+                           jsgraph()->HeapConstant(buffer), graph()->start());
+      Node* effect = NodeProperties::GetEffectInput(node);
+      Node* control = NodeProperties::GetControlInput(node);
+      node->set_op(simplified()->LoadElement(
+          AccessBuilder::ForTypedArrayElement(type, true)));
+      node->ReplaceInput(0, elements);
+      node->ReplaceInput(2, jsgraph()->Uint32Constant(length));
+      node->ReplaceInput(3, effect);
+      node->ReplaceInput(4, control);
+      node->TrimInputCount(5);
+      return Changed(node);
     }
-    Node* value = graph()->NewNode(
-        simplified()->LoadElement(element_access), elements, key,
-        jsgraph()->Uint32Constant(length), NodeProperties::GetEffectInput(node),
-        NodeProperties::GetControlInput(node));
-    return ReplaceEagerly(node, value);
   }
   return NoChange();
 }
@@ -574,35 +573,34 @@
   Type* key_type = NodeProperties::GetBounds(key).upper;
   Type* base_type = NodeProperties::GetBounds(base).upper;
   // TODO(mstarzinger): This lowering is not correct if:
-  //   a) The typed array turns external (i.e. MaterializeArrayBuffer)
-  //   b) The typed array or its buffer is neutered.
+  //   a) The typed array or its buffer is neutered.
   if (key_type->Is(Type::Integral32()) && base_type->IsConstant() &&
       base_type->AsConstant()->Value()->IsJSTypedArray()) {
     // JSStoreProperty(typed-array, int32, value)
-    JSTypedArray* array = JSTypedArray::cast(*base_type->AsConstant()->Value());
-    ElementsKind elements_kind = array->map()->elements_kind();
-    ExternalArrayType type = array->type();
-    uint32_t length;
-    CHECK(array->length()->ToUint32(&length));
-    ElementAccess element_access;
-    Node* elements = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForJSObjectElements()), base,
-        NodeProperties::GetEffectInput(node));
-    if (IsExternalArrayElementsKind(elements_kind)) {
-      elements = graph()->NewNode(
-          simplified()->LoadField(AccessBuilder::ForExternalArrayPointer()),
-          elements, NodeProperties::GetEffectInput(node));
-      element_access = AccessBuilder::ForTypedArrayElement(type, true);
-    } else {
-      DCHECK(IsFixedTypedArrayElementsKind(elements_kind));
-      element_access = AccessBuilder::ForTypedArrayElement(type, false);
+    Handle<JSTypedArray> array =
+        Handle<JSTypedArray>::cast(base_type->AsConstant()->Value());
+    if (IsExternalArrayElementsKind(array->map()->elements_kind())) {
+      Handle<JSArrayBuffer> buffer =
+          handle(JSArrayBuffer::cast(array->buffer()));
+      ExternalArrayType type = array->type();
+      uint32_t length;
+      CHECK(array->length()->ToUint32(&length));
+      Node* elements =
+          graph()->NewNode(simplified()->LoadField(
+                               AccessBuilder::ForJSArrayBufferBackingStore()),
+                           jsgraph()->HeapConstant(buffer), graph()->start());
+      Node* effect = NodeProperties::GetEffectInput(node);
+      Node* control = NodeProperties::GetControlInput(node);
+      node->set_op(simplified()->StoreElement(
+          AccessBuilder::ForTypedArrayElement(type, true)));
+      node->ReplaceInput(0, elements);
+      node->ReplaceInput(2, jsgraph()->Uint32Constant(length));
+      node->ReplaceInput(3, value);
+      node->ReplaceInput(4, effect);
+      node->ReplaceInput(5, control);
+      node->TrimInputCount(6);
+      return Changed(node);
     }
-    Node* store =
-        graph()->NewNode(simplified()->StoreElement(element_access), elements,
-                         key, jsgraph()->Uint32Constant(length), value,
-                         NodeProperties::GetEffectInput(node),
-                         NodeProperties::GetControlInput(node));
-    return ReplaceEagerly(node, store);
   }
   return NoChange();
 }
diff --git a/src/compiler/mips/OWNERS b/src/compiler/mips/OWNERS
new file mode 100644
index 0000000..5508ba6
--- /dev/null
+++ b/src/compiler/mips/OWNERS
@@ -0,0 +1,5 @@
+paul.lind@imgtec.com
+gergely.kis@imgtec.com
+akos.palfi@imgtec.com
+balazs.kilvady@imgtec.com
+dusan.milosavljevic@imgtec.com
diff --git a/src/compiler/mips/code-generator-mips.cc b/src/compiler/mips/code-generator-mips.cc
new file mode 100644
index 0000000..19d1b02
--- /dev/null
+++ b/src/compiler/mips/code-generator-mips.cc
@@ -0,0 +1,963 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/mips/macro-assembler-mips.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+
+// TODO(plind): Possibly avoid using these lithium names.
+#define kScratchReg kLithiumScratchReg
+#define kCompareReg kLithiumScratchReg2
+#define kScratchDoubleReg kLithiumScratchDouble
+
+
+// TODO(plind): consider renaming these macros.
+#define TRACE_MSG(msg)                                                      \
+  PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
+         __LINE__)
+
+#define TRACE_UNIMPL()                                                       \
+  PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \
+         __LINE__)
+
+
+// Adds Mips-specific methods to convert InstructionOperands.
+class MipsOperandConverter FINAL : public InstructionOperandConverter {
+ public:
+  MipsOperandConverter(CodeGenerator* gen, Instruction* instr)
+      : InstructionOperandConverter(gen, instr) {}
+
+  FloatRegister OutputSingleRegister(int index = 0) {
+    return ToSingleRegister(instr_->OutputAt(index));
+  }
+
+  FloatRegister InputSingleRegister(int index) {
+    return ToSingleRegister(instr_->InputAt(index));
+  }
+
+  FloatRegister ToSingleRegister(InstructionOperand* op) {
+    // Single (Float) and Double register namespace is same on MIPS,
+    // both are typedefs of FPURegister.
+    return ToDoubleRegister(op);
+  }
+
+  Operand InputImmediate(int index) {
+    Constant constant = ToConstant(instr_->InputAt(index));
+    switch (constant.type()) {
+      case Constant::kInt32:
+        return Operand(constant.ToInt32());
+      case Constant::kFloat32:
+        return Operand(
+            isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
+      case Constant::kFloat64:
+        return Operand(
+            isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+      case Constant::kInt64:
+      case Constant::kExternalReference:
+      case Constant::kHeapObject:
+        // TODO(plind): Maybe we should handle ExtRef & HeapObj here?
+        //    maybe not done on arm due to const pool ??
+        break;
+    }
+    UNREACHABLE();
+    return Operand(zero_reg);
+  }
+
+  Operand InputOperand(int index) {
+    InstructionOperand* op = instr_->InputAt(index);
+    if (op->IsRegister()) {
+      return Operand(ToRegister(op));
+    }
+    return InputImmediate(index);
+  }
+
+  MemOperand MemoryOperand(int* first_index) {
+    const int index = *first_index;
+    switch (AddressingModeField::decode(instr_->opcode())) {
+      case kMode_None:
+        break;
+      case kMode_MRI:
+        *first_index += 2;
+        return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
+      case kMode_MRR:
+        // TODO(plind): r6 address mode, to be implemented ...
+        UNREACHABLE();
+    }
+    UNREACHABLE();
+    return MemOperand(no_reg);
+  }
+
+  MemOperand MemoryOperand() {
+    int index = 0;
+    return MemoryOperand(&index);
+  }
+
+  MemOperand ToMemOperand(InstructionOperand* op) const {
+    DCHECK(op != NULL);
+    DCHECK(!op->IsRegister());
+    DCHECK(!op->IsDoubleRegister());
+    DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+    // The linkage computes where all spill slots are located.
+    FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
+    return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
+  }
+};
+
+
+static inline bool HasRegisterInput(Instruction* instr, int index) {
+  return instr->InputAt(index)->IsRegister();
+}
+
+
+// Assembles an instruction after register allocation, producing machine code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+  MipsOperandConverter i(this, instr);
+  InstructionCode opcode = instr->opcode();
+
+  switch (ArchOpcodeField::decode(opcode)) {
+    case kArchCallCodeObject: {
+      EnsureSpaceForLazyDeopt();
+      if (instr->InputAt(0)->IsImmediate()) {
+        __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
+                RelocInfo::CODE_TARGET);
+      } else {
+        __ addiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
+        __ Call(at);
+      }
+      AddSafepointAndDeopt(instr);
+      break;
+    }
+    case kArchCallJSFunction: {
+      EnsureSpaceForLazyDeopt();
+      Register func = i.InputRegister(0);
+      if (FLAG_debug_code) {
+        // Check the function's context matches the context argument.
+        __ lw(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
+        __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
+      }
+
+      __ lw(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+      __ Call(at);
+      AddSafepointAndDeopt(instr);
+      break;
+    }
+    case kArchJmp:
+      __ Branch(code_->GetLabel(i.InputBlock(0)));
+      break;
+    case kArchNop:
+      // don't emit code for nops.
+      break;
+    case kArchRet:
+      AssembleReturn();
+      break;
+    case kArchStackPointer:
+      __ mov(i.OutputRegister(), sp);
+      break;
+    case kArchTruncateDoubleToI:
+      __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+      break;
+    case kMipsAdd:
+      __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMipsAddOvf:
+      __ AdduAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0),
+                                 i.InputOperand(1), kCompareReg, kScratchReg);
+      break;
+    case kMipsSub:
+      __ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMipsSubOvf:
+      __ SubuAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0),
+                                 i.InputOperand(1), kCompareReg, kScratchReg);
+      break;
+    case kMipsMul:
+      __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMipsDiv:
+      __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMipsDivU:
+      __ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMipsMod:
+      __ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMipsModU:
+      __ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMipsAnd:
+      __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMipsOr:
+      __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMipsXor:
+      __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMipsShl:
+      if (instr->InputAt(1)->IsRegister()) {
+        __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      } else {
+        int32_t imm = i.InputOperand(1).immediate();
+        __ sll(i.OutputRegister(), i.InputRegister(0), imm);
+      }
+      break;
+    case kMipsShr:
+      if (instr->InputAt(1)->IsRegister()) {
+        __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      } else {
+        int32_t imm = i.InputOperand(1).immediate();
+        __ srl(i.OutputRegister(), i.InputRegister(0), imm);
+      }
+      break;
+    case kMipsSar:
+      if (instr->InputAt(1)->IsRegister()) {
+        __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      } else {
+        int32_t imm = i.InputOperand(1).immediate();
+        __ sra(i.OutputRegister(), i.InputRegister(0), imm);
+      }
+      break;
+    case kMipsRor:
+      __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMipsTst:
+      // Psuedo-instruction used for tst/branch.
+      __ And(kCompareReg, i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMipsCmp:
+      // Psuedo-instruction used for cmp/branch. No opcode emitted here.
+      break;
+    case kMipsMov:
+      // TODO(plind): Should we combine mov/li like this, or use separate instr?
+      //    - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
+      if (HasRegisterInput(instr, 0)) {
+        __ mov(i.OutputRegister(), i.InputRegister(0));
+      } else {
+        __ li(i.OutputRegister(), i.InputOperand(0));
+      }
+      break;
+
+    case kMipsCmpD:
+      // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
+      break;
+    case kMipsAddD:
+      // TODO(plind): add special case: combine mult & add.
+      __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+               i.InputDoubleRegister(1));
+      break;
+    case kMipsSubD:
+      __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+               i.InputDoubleRegister(1));
+      break;
+    case kMipsMulD:
+      // TODO(plind): add special case: right op is -1.0, see arm port.
+      __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+               i.InputDoubleRegister(1));
+      break;
+    case kMipsDivD:
+      __ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+               i.InputDoubleRegister(1));
+      break;
+    case kMipsModD: {
+      // TODO(bmeurer): We should really get rid of this special instruction,
+      // and generate a CallAddress instruction instead.
+      FrameScope scope(masm(), StackFrame::MANUAL);
+      __ PrepareCallCFunction(0, 2, kScratchReg);
+      __ MovToFloatParameters(i.InputDoubleRegister(0),
+                              i.InputDoubleRegister(1));
+      __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
+                       0, 2);
+      // Move the result in the double result register.
+      __ MovFromFloatResult(i.OutputDoubleRegister());
+      break;
+    }
+    case kMipsSqrtD: {
+      __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
+    }
+    case kMipsCvtSD: {
+      __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
+      break;
+    }
+    case kMipsCvtDS: {
+      __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
+      break;
+    }
+    case kMipsCvtDW: {
+      FPURegister scratch = kScratchDoubleReg;
+      __ mtc1(i.InputRegister(0), scratch);
+      __ cvt_d_w(i.OutputDoubleRegister(), scratch);
+      break;
+    }
+    case kMipsCvtDUw: {
+      FPURegister scratch = kScratchDoubleReg;
+      __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
+      break;
+    }
+    case kMipsTruncWD: {
+      FPURegister scratch = kScratchDoubleReg;
+      // Other arches use round to zero here, so we follow.
+      __ trunc_w_d(scratch, i.InputDoubleRegister(0));
+      __ mfc1(i.OutputRegister(), scratch);
+      break;
+    }
+    case kMipsTruncUwD: {
+      FPURegister scratch = kScratchDoubleReg;
+      // TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
+      __ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
+      break;
+    }
+    // ... more basic instructions ...
+
+    case kMipsLbu:
+      __ lbu(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kMipsLb:
+      __ lb(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kMipsSb:
+      __ sb(i.InputRegister(2), i.MemoryOperand());
+      break;
+    case kMipsLhu:
+      __ lhu(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kMipsLh:
+      __ lh(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kMipsSh:
+      __ sh(i.InputRegister(2), i.MemoryOperand());
+      break;
+    case kMipsLw:
+      __ lw(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kMipsSw:
+      __ sw(i.InputRegister(2), i.MemoryOperand());
+      break;
+    case kMipsLwc1: {
+      __ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
+      break;
+    }
+    case kMipsSwc1: {
+      int index = 0;
+      MemOperand operand = i.MemoryOperand(&index);
+      __ swc1(i.InputSingleRegister(index), operand);
+      break;
+    }
+    case kMipsLdc1:
+      __ ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
+      break;
+    case kMipsSdc1:
+      __ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
+      break;
+    case kMipsPush:
+      __ Push(i.InputRegister(0));
+      break;
+    case kMipsStoreWriteBarrier:
+      Register object = i.InputRegister(0);
+      Register index = i.InputRegister(1);
+      Register value = i.InputRegister(2);
+      __ addu(index, object, index);
+      __ sw(value, MemOperand(index));
+      SaveFPRegsMode mode =
+          frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+      RAStatus ra_status = kRAHasNotBeenSaved;
+      __ RecordWrite(object, index, value, ra_status, mode);
+      break;
+  }
+}
+
+
+#define UNSUPPORTED_COND(opcode, condition)                                  \
+  OFStream out(stdout);                                                      \
+  out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \
+  UNIMPLEMENTED();
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr,
+                                       FlagsCondition condition) {
+  MipsOperandConverter i(this, instr);
+  Label done;
+
+  // Emit a branch. The true and false targets are always the last two inputs
+  // to the instruction.
+  BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
+  BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
+  bool fallthru = IsNextInAssemblyOrder(fblock);
+  Label* tlabel = code()->GetLabel(tblock);
+  Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
+  Condition cc = kNoCondition;
+
+  // MIPS does not have condition code flags, so compare and branch are
+  // implemented differently than on the other arch's. The compare operations
+  // emit mips psuedo-instructions, which are handled here by branch
+  // instructions that do the actual comparison. Essential that the input
+  // registers to compare psuedo-op are not modified before this branch op, as
+  // they are tested here.
+  // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were
+  //    not separated by other instructions.
+
+  if (instr->arch_opcode() == kMipsTst) {
+    // The kMipsTst psuedo-instruction emits And to 'kCompareReg' register.
+    switch (condition) {
+      case kNotEqual:
+        cc = ne;
+        break;
+      case kEqual:
+        cc = eq;
+        break;
+      default:
+        UNSUPPORTED_COND(kMipsTst, condition);
+        break;
+    }
+    __ Branch(tlabel, cc, kCompareReg, Operand(zero_reg));
+
+  } else if (instr->arch_opcode() == kMipsAddOvf ||
+             instr->arch_opcode() == kMipsSubOvf) {
+    // kMipsAddOvf, SubOvf emit negative result to 'kCompareReg' on overflow.
+    switch (condition) {
+      case kOverflow:
+        cc = lt;
+        break;
+      case kNotOverflow:
+        cc = ge;
+        break;
+      default:
+        UNSUPPORTED_COND(kMipsAddOvf, condition);
+        break;
+    }
+    __ Branch(tlabel, cc, kCompareReg, Operand(zero_reg));
+
+  } else if (instr->arch_opcode() == kMipsCmp) {
+    switch (condition) {
+      case kEqual:
+        cc = eq;
+        break;
+      case kNotEqual:
+        cc = ne;
+        break;
+      case kSignedLessThan:
+        cc = lt;
+        break;
+      case kSignedGreaterThanOrEqual:
+        cc = ge;
+        break;
+      case kSignedLessThanOrEqual:
+        cc = le;
+        break;
+      case kSignedGreaterThan:
+        cc = gt;
+        break;
+      case kUnsignedLessThan:
+        cc = lo;
+        break;
+      case kUnsignedGreaterThanOrEqual:
+        cc = hs;
+        break;
+      case kUnsignedLessThanOrEqual:
+        cc = ls;
+        break;
+      case kUnsignedGreaterThan:
+        cc = hi;
+        break;
+      default:
+        UNSUPPORTED_COND(kMipsCmp, condition);
+        break;
+    }
+    __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
+
+    if (!fallthru) __ Branch(flabel);  // no fallthru to flabel.
+    __ bind(&done);
+
+  } else if (instr->arch_opcode() == kMipsCmpD) {
+    // TODO(dusmil) optimize unordered checks to use less instructions
+    // even if we have to unfold BranchF macro.
+    Label* nan = flabel;
+    switch (condition) {
+      case kUnorderedEqual:
+        cc = eq;
+        break;
+      case kUnorderedNotEqual:
+        cc = ne;
+        nan = tlabel;
+        break;
+      case kUnorderedLessThan:
+        cc = lt;
+        break;
+      case kUnorderedGreaterThanOrEqual:
+        cc = ge;
+        nan = tlabel;
+        break;
+      case kUnorderedLessThanOrEqual:
+        cc = le;
+        break;
+      case kUnorderedGreaterThan:
+        cc = gt;
+        nan = tlabel;
+        break;
+      default:
+        UNSUPPORTED_COND(kMipsCmpD, condition);
+        break;
+    }
+    __ BranchF(tlabel, nan, cc, i.InputDoubleRegister(0),
+               i.InputDoubleRegister(1));
+
+    if (!fallthru) __ Branch(flabel);  // no fallthru to flabel.
+    __ bind(&done);
+
+  } else {
+    PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
+           instr->arch_opcode());
+    UNIMPLEMENTED();
+  }
+}
+
+
+// Assembles boolean materializations after an instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+                                        FlagsCondition condition) {
+  MipsOperandConverter i(this, instr);
+  Label done;
+
+  // Materialize a full 32-bit 1 or 0 value. The result register is always the
+  // last output of the instruction.
+  Label false_value;
+  DCHECK_NE(0, instr->OutputCount());
+  Register result = i.OutputRegister(instr->OutputCount() - 1);
+  Condition cc = kNoCondition;
+
+  // MIPS does not have condition code flags, so compare and branch are
+  // implemented differently than on the other arch's. The compare operations
+  // emit mips psuedo-instructions, which are checked and handled here.
+
+  // For materializations, we use delay slot to set the result true, and
+  // in the false case, where we fall thru the branch, we reset the result
+  // false.
+
+  // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were
+  //    not separated by other instructions.
+  if (instr->arch_opcode() == kMipsTst) {
+    // The kMipsTst psuedo-instruction emits And to 'kCompareReg' register.
+    switch (condition) {
+      case kNotEqual:
+        cc = ne;
+        break;
+      case kEqual:
+        cc = eq;
+        break;
+      default:
+        UNSUPPORTED_COND(kMipsTst, condition);
+        break;
+    }
+    __ Branch(USE_DELAY_SLOT, &done, cc, kCompareReg, Operand(zero_reg));
+    __ li(result, Operand(1));  // In delay slot.
+
+  } else if (instr->arch_opcode() == kMipsAddOvf ||
+             instr->arch_opcode() == kMipsSubOvf) {
+    // kMipsAddOvf, SubOvf emits negative result to 'kCompareReg' on overflow.
+    switch (condition) {
+      case kOverflow:
+        cc = lt;
+        break;
+      case kNotOverflow:
+        cc = ge;
+        break;
+      default:
+        UNSUPPORTED_COND(kMipsAddOvf, condition);
+        break;
+    }
+    __ Branch(USE_DELAY_SLOT, &done, cc, kCompareReg, Operand(zero_reg));
+    __ li(result, Operand(1));  // In delay slot.
+
+
+  } else if (instr->arch_opcode() == kMipsCmp) {
+    Register left = i.InputRegister(0);
+    Operand right = i.InputOperand(1);
+    switch (condition) {
+      case kEqual:
+        cc = eq;
+        break;
+      case kNotEqual:
+        cc = ne;
+        break;
+      case kSignedLessThan:
+        cc = lt;
+        break;
+      case kSignedGreaterThanOrEqual:
+        cc = ge;
+        break;
+      case kSignedLessThanOrEqual:
+        cc = le;
+        break;
+      case kSignedGreaterThan:
+        cc = gt;
+        break;
+      case kUnsignedLessThan:
+        cc = lo;
+        break;
+      case kUnsignedGreaterThanOrEqual:
+        cc = hs;
+        break;
+      case kUnsignedLessThanOrEqual:
+        cc = ls;
+        break;
+      case kUnsignedGreaterThan:
+        cc = hi;
+        break;
+      default:
+        UNSUPPORTED_COND(kMipsCmp, condition);
+        break;
+    }
+    __ Branch(USE_DELAY_SLOT, &done, cc, left, right);
+    __ li(result, Operand(1));  // In delay slot.
+
+  } else if (instr->arch_opcode() == kMipsCmpD) {
+    FPURegister left = i.InputDoubleRegister(0);
+    FPURegister right = i.InputDoubleRegister(1);
+    // TODO(plind): Provide NaN-testing macro-asm function without need for
+    // BranchF.
+    FPURegister dummy1 = f0;
+    FPURegister dummy2 = f2;
+    switch (condition) {
+      case kUnorderedEqual:
+        // TODO(plind):  improve the NaN testing throughout this function.
+        __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
+        cc = eq;
+        break;
+      case kUnorderedNotEqual:
+        __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
+        __ li(result, Operand(1));  // In delay slot - returns 1 on NaN.
+        cc = ne;
+        break;
+      case kUnorderedLessThan:
+        __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
+        cc = lt;
+        break;
+      case kUnorderedGreaterThanOrEqual:
+        __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
+        __ li(result, Operand(1));  // In delay slot - returns 1 on NaN.
+        cc = ge;
+        break;
+      case kUnorderedLessThanOrEqual:
+        __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
+        cc = le;
+        break;
+      case kUnorderedGreaterThan:
+        __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
+        __ li(result, Operand(1));  // In delay slot - returns 1 on NaN.
+        cc = gt;
+        break;
+      default:
+        UNSUPPORTED_COND(kMipsCmp, condition);
+        break;
+    }
+    __ BranchF(USE_DELAY_SLOT, &done, NULL, cc, left, right);
+    __ li(result, Operand(1));  // In delay slot - branch taken returns 1.
+                                // Fall-thru (branch not taken) returns 0.
+
+  } else {
+    PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
+           instr->arch_opcode());
+    TRACE_UNIMPL();
+    UNIMPLEMENTED();
+  }
+  // Fallthru case is the false materialization.
+  __ bind(&false_value);
+  __ li(result, Operand(0));
+  __ bind(&done);
+}
+
+
+void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+  Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+      isolate(), deoptimization_id, Deoptimizer::LAZY);
+  __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+}
+
+
+void CodeGenerator::AssemblePrologue() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  if (descriptor->kind() == CallDescriptor::kCallAddress) {
+    __ Push(ra, fp);
+    __ mov(fp, sp);
+    const RegList saves = descriptor->CalleeSavedRegisters();
+    if (saves != 0) {  // Save callee-saved registers.
+      // TODO(plind): make callee save size const, possibly DCHECK it.
+      int register_save_area_size = 0;
+      for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+        if (!((1 << i) & saves)) continue;
+        register_save_area_size += kPointerSize;
+      }
+      frame()->SetRegisterSaveAreaSize(register_save_area_size);
+      __ MultiPush(saves);
+    }
+  } else if (descriptor->IsJSFunctionCall()) {
+    CompilationInfo* info = linkage()->info();
+    __ Prologue(info->IsCodePreAgingActive());
+    frame()->SetRegisterSaveAreaSize(
+        StandardFrameConstants::kFixedFrameSizeFromFp);
+
+    // Sloppy mode functions and builtins need to replace the receiver with the
+    // global proxy when called as functions (without an explicit receiver
+    // object).
+    // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
+    if (info->strict_mode() == SLOPPY && !info->is_native()) {
+      Label ok;
+      // +2 for return address and saved frame pointer.
+      int receiver_slot = info->scope()->num_parameters() + 2;
+      __ lw(a2, MemOperand(fp, receiver_slot * kPointerSize));
+      __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+      __ Branch(&ok, ne, a2, Operand(at));
+
+      __ lw(a2, GlobalObjectOperand());
+      __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
+      __ sw(a2, MemOperand(fp, receiver_slot * kPointerSize));
+      __ bind(&ok);
+    }
+  } else {
+    __ StubPrologue();
+    frame()->SetRegisterSaveAreaSize(
+        StandardFrameConstants::kFixedFrameSizeFromFp);
+  }
+  int stack_slots = frame()->GetSpillSlotCount();
+  if (stack_slots > 0) {
+    __ Subu(sp, sp, Operand(stack_slots * kPointerSize));
+  }
+}
+
+
+void CodeGenerator::AssembleReturn() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  if (descriptor->kind() == CallDescriptor::kCallAddress) {
+    if (frame()->GetRegisterSaveAreaSize() > 0) {
+      // Remove this frame's spill slots first.
+      int stack_slots = frame()->GetSpillSlotCount();
+      if (stack_slots > 0) {
+        __ Addu(sp, sp, Operand(stack_slots * kPointerSize));
+      }
+      // Restore registers.
+      const RegList saves = descriptor->CalleeSavedRegisters();
+      if (saves != 0) {
+        __ MultiPop(saves);
+      }
+    }
+    __ mov(sp, fp);
+    __ Pop(ra, fp);
+    __ Ret();
+  } else {
+    __ mov(sp, fp);
+    __ Pop(ra, fp);
+    int pop_count = descriptor->IsJSFunctionCall()
+                        ? static_cast<int>(descriptor->JSParameterCount())
+                        : 0;
+    __ DropAndRet(pop_count);
+  }
+}
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  MipsOperandConverter g(this, NULL);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    Register src = g.ToRegister(source);
+    if (destination->IsRegister()) {
+      __ mov(g.ToRegister(destination), src);
+    } else {
+      __ sw(src, g.ToMemOperand(destination));
+    }
+  } else if (source->IsStackSlot()) {
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    MemOperand src = g.ToMemOperand(source);
+    if (destination->IsRegister()) {
+      __ lw(g.ToRegister(destination), src);
+    } else {
+      Register temp = kScratchReg;
+      __ lw(temp, src);
+      __ sw(temp, g.ToMemOperand(destination));
+    }
+  } else if (source->IsConstant()) {
+    Constant src = g.ToConstant(source);
+    if (destination->IsRegister() || destination->IsStackSlot()) {
+      Register dst =
+          destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
+      switch (src.type()) {
+        case Constant::kInt32:
+          __ li(dst, Operand(src.ToInt32()));
+          break;
+        case Constant::kFloat32:
+          __ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
+          break;
+        case Constant::kInt64:
+          UNREACHABLE();
+          break;
+        case Constant::kFloat64:
+          __ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
+          break;
+        case Constant::kExternalReference:
+          __ li(dst, Operand(src.ToExternalReference()));
+          break;
+        case Constant::kHeapObject:
+          __ li(dst, src.ToHeapObject());
+          break;
+      }
+      if (destination->IsStackSlot()) __ sw(dst, g.ToMemOperand(destination));
+    } else if (src.type() == Constant::kFloat32) {
+      FPURegister dst = destination->IsDoubleRegister()
+                            ? g.ToDoubleRegister(destination)
+                            : kScratchDoubleReg.low();
+      // TODO(turbofan): Can we do better here?
+      __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
+      __ mtc1(at, dst);
+      if (destination->IsDoubleStackSlot()) {
+        __ swc1(dst, g.ToMemOperand(destination));
+      }
+    } else {
+      DCHECK_EQ(Constant::kFloat64, src.type());
+      DoubleRegister dst = destination->IsDoubleRegister()
+                               ? g.ToDoubleRegister(destination)
+                               : kScratchDoubleReg;
+      __ Move(dst, src.ToFloat64());
+      if (destination->IsDoubleStackSlot()) {
+        __ sdc1(dst, g.ToMemOperand(destination));
+      }
+    }
+  } else if (source->IsDoubleRegister()) {
+    FPURegister src = g.ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      FPURegister dst = g.ToDoubleRegister(destination);
+      __ Move(dst, src);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      __ sdc1(src, g.ToMemOperand(destination));
+    }
+  } else if (source->IsDoubleStackSlot()) {
+    DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+    MemOperand src = g.ToMemOperand(source);
+    if (destination->IsDoubleRegister()) {
+      __ ldc1(g.ToDoubleRegister(destination), src);
+    } else {
+      FPURegister temp = kScratchDoubleReg;
+      __ ldc1(temp, src);
+      __ sdc1(temp, g.ToMemOperand(destination));
+    }
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  MipsOperandConverter g(this, NULL);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    // Register-register.
+    Register temp = kScratchReg;
+    Register src = g.ToRegister(source);
+    if (destination->IsRegister()) {
+      Register dst = g.ToRegister(destination);
+      __ Move(temp, src);
+      __ Move(src, dst);
+      __ Move(dst, temp);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      MemOperand dst = g.ToMemOperand(destination);
+      __ mov(temp, src);
+      __ lw(src, dst);
+      __ sw(temp, dst);
+    }
+  } else if (source->IsStackSlot()) {
+    DCHECK(destination->IsStackSlot());
+    Register temp_0 = kScratchReg;
+    Register temp_1 = kCompareReg;
+    MemOperand src = g.ToMemOperand(source);
+    MemOperand dst = g.ToMemOperand(destination);
+    __ lw(temp_0, src);
+    __ lw(temp_1, dst);
+    __ sw(temp_0, dst);
+    __ sw(temp_1, src);
+  } else if (source->IsDoubleRegister()) {
+    FPURegister temp = kScratchDoubleReg;
+    FPURegister src = g.ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      FPURegister dst = g.ToDoubleRegister(destination);
+      __ Move(temp, src);
+      __ Move(src, dst);
+      __ Move(dst, temp);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      MemOperand dst = g.ToMemOperand(destination);
+      __ Move(temp, src);
+      __ ldc1(src, dst);
+      __ sdc1(temp, dst);
+    }
+  } else if (source->IsDoubleStackSlot()) {
+    DCHECK(destination->IsDoubleStackSlot());
+    Register temp_0 = kScratchReg;
+    FPURegister temp_1 = kScratchDoubleReg;
+    MemOperand src0 = g.ToMemOperand(source);
+    MemOperand src1(src0.rm(), src0.offset() + kPointerSize);
+    MemOperand dst0 = g.ToMemOperand(destination);
+    MemOperand dst1(dst0.rm(), dst0.offset() + kPointerSize);
+    __ ldc1(temp_1, dst0);  // Save destination in temp_1.
+    __ lw(temp_0, src0);    // Then use temp_0 to copy source to destination.
+    __ sw(temp_0, dst0);
+    __ lw(temp_0, src1);
+    __ sw(temp_0, dst1);
+    __ sdc1(temp_1, src0);
+  } else {
+    // No other combinations are possible.
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() {
+  // Unused on 32-bit ARM. Still exists on 64-bit arm.
+  // TODO(plind): Unclear when this is called now. Understand, fix if needed.
+  __ nop();  // Maybe PROPERTY_ACCESS_INLINED?
+}
+
+
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+  int space_needed = Deoptimizer::patch_size();
+  if (!linkage()->info()->IsStub()) {
+    // Ensure that we have enough space after the previous lazy-bailout
+    // instruction for patching the code here.
+    int current_pc = masm()->pc_offset();
+    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+      // Block tramoline pool emission for duration of padding.
+      v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
+          masm());
+      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+      DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
+      while (padding_size > 0) {
+        __ nop();
+        padding_size -= v8::internal::Assembler::kInstrSize;
+      }
+    }
+  }
+  MarkLazyDeoptSite();
+}
+
+#undef __
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/mips/instruction-codes-mips.h b/src/compiler/mips/instruction-codes-mips.h
new file mode 100644
index 0000000..cd05afc
--- /dev/null
+++ b/src/compiler/mips/instruction-codes-mips.h
@@ -0,0 +1,86 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_MIPS_INSTRUCTION_CODES_MIPS_H_
+#define V8_COMPILER_MIPS_INSTRUCTION_CODES_MIPS_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// MIPS-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+  V(MipsAdd)                       \
+  V(MipsAddOvf)                    \
+  V(MipsSub)                       \
+  V(MipsSubOvf)                    \
+  V(MipsMul)                       \
+  V(MipsDiv)                       \
+  V(MipsDivU)                      \
+  V(MipsMod)                       \
+  V(MipsModU)                      \
+  V(MipsAnd)                       \
+  V(MipsOr)                        \
+  V(MipsXor)                       \
+  V(MipsShl)                       \
+  V(MipsShr)                       \
+  V(MipsSar)                       \
+  V(MipsRor)                       \
+  V(MipsMov)                       \
+  V(MipsTst)                       \
+  V(MipsCmp)                       \
+  V(MipsCmpD)                      \
+  V(MipsAddD)                      \
+  V(MipsSubD)                      \
+  V(MipsMulD)                      \
+  V(MipsDivD)                      \
+  V(MipsModD)                      \
+  V(MipsSqrtD)                     \
+  V(MipsCvtSD)                     \
+  V(MipsCvtDS)                     \
+  V(MipsTruncWD)                   \
+  V(MipsTruncUwD)                  \
+  V(MipsCvtDW)                     \
+  V(MipsCvtDUw)                    \
+  V(MipsLb)                        \
+  V(MipsLbu)                       \
+  V(MipsSb)                        \
+  V(MipsLh)                        \
+  V(MipsLhu)                       \
+  V(MipsSh)                        \
+  V(MipsLw)                        \
+  V(MipsSw)                        \
+  V(MipsLwc1)                      \
+  V(MipsSwc1)                      \
+  V(MipsLdc1)                      \
+  V(MipsSdc1)                      \
+  V(MipsPush)                      \
+  V(MipsStoreWriteBarrier)
+
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MRI = [register + immediate]
+// MRR = [register + register]
+// TODO(plind): Add the new r6 address modes.
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+  V(MRI) /* [%r0 + K] */               \
+  V(MRR) /* [%r0 + %r1] */
+
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_MIPS_INSTRUCTION_CODES_MIPS_H_
diff --git a/src/compiler/mips/instruction-selector-mips.cc b/src/compiler/mips/instruction-selector-mips.cc
new file mode 100644
index 0000000..4edfd59
--- /dev/null
+++ b/src/compiler/mips/instruction-selector-mips.cc
@@ -0,0 +1,544 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/bits.h"
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define TRACE_UNIMPL() \
+  PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
+
+#define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
+
+
+// Adds Mips-specific methods for generating InstructionOperands.
+class MipsOperandGenerator FINAL : public OperandGenerator {
+ public:
+  explicit MipsOperandGenerator(InstructionSelector* selector)
+      : OperandGenerator(selector) {}
+
+  InstructionOperand* UseOperand(Node* node, InstructionCode opcode) {
+    if (CanBeImmediate(node, opcode)) {
+      return UseImmediate(node);
+    }
+    return UseRegister(node);
+  }
+
+  bool CanBeImmediate(Node* node, InstructionCode opcode) {
+    Int32Matcher m(node);
+    if (!m.HasValue()) return false;
+    int32_t value = m.Value();
+    switch (ArchOpcodeField::decode(opcode)) {
+      case kMipsShl:
+      case kMipsSar:
+      case kMipsShr:
+        return is_uint5(value);
+      case kMipsXor:
+        return is_uint16(value);
+      case kMipsLdc1:
+      case kMipsSdc1:
+        return is_int16(value + kIntSize);
+      default:
+        return is_int16(value);
+    }
+  }
+
+ private:
+  bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
+    TRACE_UNIMPL();
+    return false;
+  }
+};
+
+
+static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
+                     Node* node) {
+  MipsOperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)),
+                 g.UseRegister(node->InputAt(1)));
+}
+
+
+static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
+                     Node* node) {
+  MipsOperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)),
+                 g.UseOperand(node->InputAt(1), opcode));
+}
+
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+                       InstructionCode opcode, FlagsContinuation* cont) {
+  MipsOperandGenerator g(selector);
+  Int32BinopMatcher m(node);
+  InstructionOperand* inputs[4];
+  size_t input_count = 0;
+  InstructionOperand* outputs[2];
+  size_t output_count = 0;
+
+  inputs[input_count++] = g.UseRegister(m.left().node());
+  inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
+
+  if (cont->IsBranch()) {
+    inputs[input_count++] = g.Label(cont->true_block());
+    inputs[input_count++] = g.Label(cont->false_block());
+  }
+
+  outputs[output_count++] = g.DefineAsRegister(node);
+  if (cont->IsSet()) {
+    outputs[output_count++] = g.DefineAsRegister(cont->result());
+  }
+
+  DCHECK_NE(0, input_count);
+  DCHECK_NE(0, output_count);
+  DCHECK_GE(arraysize(inputs), input_count);
+  DCHECK_GE(arraysize(outputs), output_count);
+
+  Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+                                      outputs, input_count, inputs);
+  if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+                       InstructionCode opcode) {
+  FlagsContinuation cont;
+  VisitBinop(selector, node, opcode, &cont);
+}
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
+  MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+  MipsOperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepFloat32:
+      opcode = kMipsLwc1;
+      break;
+    case kRepFloat64:
+      opcode = kMipsLdc1;
+      break;
+    case kRepBit:  // Fall through.
+    case kRepWord8:
+      opcode = typ == kTypeUint32 ? kMipsLbu : kMipsLb;
+      break;
+    case kRepWord16:
+      opcode = typ == kTypeUint32 ? kMipsLhu : kMipsLh;
+      break;
+    case kRepTagged:  // Fall through.
+    case kRepWord32:
+      opcode = kMipsLw;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+
+  if (g.CanBeImmediate(index, opcode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+  } else {
+    InstructionOperand* addr_reg = g.TempRegister();
+    Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
+         g.UseRegister(index), g.UseRegister(base));
+    // Emit desired load opcode, using temp addr_reg.
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
+  }
+}
+
+
+void InstructionSelector::VisitStore(Node* node) {
+  MipsOperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  Node* value = node->InputAt(2);
+
+  StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+  MachineType rep = RepresentationOf(store_rep.machine_type());
+  if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
+    DCHECK(rep == kRepTagged);
+    // TODO(dcarney): refactor RecordWrite function to take temp registers
+    //                and pass them here instead of using fixed regs
+    // TODO(dcarney): handle immediate indices.
+    InstructionOperand* temps[] = {g.TempRegister(t1), g.TempRegister(t2)};
+    Emit(kMipsStoreWriteBarrier, NULL, g.UseFixed(base, t0),
+         g.UseFixed(index, t1), g.UseFixed(value, t2), arraysize(temps), temps);
+    return;
+  }
+  DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
+
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepFloat32:
+      opcode = kMipsSwc1;
+      break;
+    case kRepFloat64:
+      opcode = kMipsSdc1;
+      break;
+    case kRepBit:  // Fall through.
+    case kRepWord8:
+      opcode = kMipsSb;
+      break;
+    case kRepWord16:
+      opcode = kMipsSh;
+      break;
+    case kRepTagged:  // Fall through.
+    case kRepWord32:
+      opcode = kMipsSw;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+
+  if (g.CanBeImmediate(index, opcode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+         g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+  } else {
+    InstructionOperand* addr_reg = g.TempRegister();
+    Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg,
+         g.UseRegister(index), g.UseRegister(base));
+    // Emit desired store opcode, using temp addr_reg.
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL, addr_reg,
+         g.TempImmediate(0), g.UseRegister(value));
+  }
+}
+
+
+void InstructionSelector::VisitWord32And(Node* node) {
+  VisitBinop(this, node, kMipsAnd);
+}
+
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+  VisitBinop(this, node, kMipsOr);
+}
+
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+  VisitBinop(this, node, kMipsXor);
+}
+
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+  VisitRRO(this, kMipsShl, node);
+}
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+  VisitRRO(this, kMipsShr, node);
+}
+
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+  VisitRRO(this, kMipsSar, node);
+}
+
+
+void InstructionSelector::VisitWord32Ror(Node* node) {
+  VisitRRO(this, kMipsRor, node);
+}
+
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+  MipsOperandGenerator g(this);
+
+  // TODO(plind): Consider multiply & add optimization from arm port.
+  VisitBinop(this, node, kMipsAdd);
+}
+
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+  VisitBinop(this, node, kMipsSub);
+}
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+  MipsOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.right().HasValue() && m.right().Value() > 0) {
+    int32_t value = m.right().Value();
+    if (base::bits::IsPowerOfTwo32(value)) {
+      Emit(kMipsShl | AddressingModeField::encode(kMode_None),
+           g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+           g.TempImmediate(WhichPowerOf2(value)));
+      return;
+    }
+    if (base::bits::IsPowerOfTwo32(value - 1)) {
+      InstructionOperand* temp = g.TempRegister();
+      Emit(kMipsShl | AddressingModeField::encode(kMode_None), temp,
+           g.UseRegister(m.left().node()),
+           g.TempImmediate(WhichPowerOf2(value - 1)));
+      Emit(kMipsAdd | AddressingModeField::encode(kMode_None),
+           g.DefineAsRegister(node), g.UseRegister(m.left().node()), temp);
+      return;
+    }
+    if (base::bits::IsPowerOfTwo32(value + 1)) {
+      InstructionOperand* temp = g.TempRegister();
+      Emit(kMipsShl | AddressingModeField::encode(kMode_None), temp,
+           g.UseRegister(m.left().node()),
+           g.TempImmediate(WhichPowerOf2(value + 1)));
+      Emit(kMipsSub | AddressingModeField::encode(kMode_None),
+           g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
+      return;
+    }
+  }
+  Emit(kMipsMul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+  MipsOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  Emit(kMipsDiv, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitUint32Div(Node* node) {
+  MipsOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  Emit(kMipsDivU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+  MipsOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  Emit(kMipsMod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitUint32Mod(Node* node) {
+  MipsOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  Emit(kMipsModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
+  MipsOperandGenerator g(this);
+  Emit(kMipsCvtDS, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+  MipsOperandGenerator g(this);
+  Emit(kMipsCvtDW, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+  MipsOperandGenerator g(this);
+  Emit(kMipsCvtDUw, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+  MipsOperandGenerator g(this);
+  Emit(kMipsTruncWD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+  MipsOperandGenerator g(this);
+  Emit(kMipsTruncUwD, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+  MipsOperandGenerator g(this);
+  Emit(kMipsCvtSD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+  VisitRRR(this, kMipsAddD, node);
+}
+
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+  VisitRRR(this, kMipsSubD, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+  VisitRRR(this, kMipsMulD, node);
+}
+
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+  VisitRRR(this, kMipsDivD, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+  MipsOperandGenerator g(this);
+  Emit(kMipsModD, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12),
+       g.UseFixed(node->InputAt(1), f14))->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+  MipsOperandGenerator g(this);
+  Emit(kMipsSqrtD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
+                                    BasicBlock* deoptimization) {
+  MipsOperandGenerator g(this);
+  CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
+
+  FrameStateDescriptor* frame_state_descriptor = NULL;
+  if (descriptor->NeedsFrameState()) {
+    frame_state_descriptor =
+        GetFrameStateDescriptor(call->InputAt(descriptor->InputCount()));
+  }
+
+  CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+  // Compute InstructionOperands for inputs and outputs.
+  InitializeCallBuffer(call, &buffer, true, false);
+
+  // TODO(dcarney): might be possible to use claim/poke instead
+  // Push any stack arguments.
+  for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
+       input != buffer.pushed_nodes.rend(); input++) {
+    // TODO(plind): inefficient for MIPS, use MultiPush here.
+    //    - Also need to align the stack. See arm64.
+    //    - Maybe combine with arg slot stuff in DirectCEntry stub.
+    Emit(kMipsPush, NULL, g.UseRegister(*input));
+  }
+
+  // Select the appropriate opcode based on the call type.
+  InstructionCode opcode;
+  switch (descriptor->kind()) {
+    case CallDescriptor::kCallCodeObject: {
+      opcode = kArchCallCodeObject;
+      break;
+    }
+    case CallDescriptor::kCallJSFunction:
+      opcode = kArchCallJSFunction;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  opcode |= MiscField::encode(descriptor->flags());
+
+  // Emit the call instruction.
+  Instruction* call_instr =
+      Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
+           buffer.instruction_args.size(), &buffer.instruction_args.front());
+
+  call_instr->MarkAsCall();
+  if (deoptimization != NULL) {
+    DCHECK(continuation != NULL);
+    call_instr->MarkAsControl();
+  }
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
+                                                    FlagsContinuation* cont) {
+  VisitBinop(this, node, kMipsAddOvf, cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
+                                                    FlagsContinuation* cont) {
+  VisitBinop(this, node, kMipsSubOvf, cont);
+}
+
+
+// Shared routine for multiple compare operations.
+static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+                         InstructionOperand* left, InstructionOperand* right,
+                         FlagsContinuation* cont) {
+  MipsOperandGenerator g(selector);
+  opcode = cont->Encode(opcode);
+  if (cont->IsBranch()) {
+    selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
+                   g.Label(cont->false_block()))->MarkAsControl();
+  } else {
+    DCHECK(cont->IsSet());
+    // TODO(plind): Revisit and test this path.
+    selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+  }
+}
+
+
+// Shared routine for multiple word compare operations.
+static void VisitWordCompare(InstructionSelector* selector, Node* node,
+                             InstructionCode opcode, FlagsContinuation* cont,
+                             bool commutative) {
+  MipsOperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+
+  // Match immediates on left or right side of comparison.
+  if (g.CanBeImmediate(right, opcode)) {
+    VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
+                 cont);
+  } else if (g.CanBeImmediate(left, opcode)) {
+    if (!commutative) cont->Commute();
+    VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
+                 cont);
+  } else {
+    VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
+                 cont);
+  }
+}
+
+
+void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
+  switch (node->opcode()) {
+    case IrOpcode::kWord32And:
+      // TODO(plind): understand the significance of 'IR and' special case.
+      return VisitWordCompare(this, node, kMipsTst, cont, true);
+    default:
+      break;
+  }
+
+  MipsOperandGenerator g(this);
+  // kMipsTst is a pseudo-instruction to do logical 'and' and leave the result
+  // in a dedicated tmp register.
+  VisitCompare(this, kMipsTst, g.UseRegister(node), g.UseRegister(node), cont);
+}
+
+
+void InstructionSelector::VisitWord32Compare(Node* node,
+                                             FlagsContinuation* cont) {
+  VisitWordCompare(this, node, kMipsCmp, cont, false);
+}
+
+
+void InstructionSelector::VisitFloat64Compare(Node* node,
+                                              FlagsContinuation* cont) {
+  MipsOperandGenerator g(this);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  VisitCompare(this, kMipsCmpD, g.UseRegister(left), g.UseRegister(right),
+               cont);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/mips/linkage-mips.cc b/src/compiler/mips/linkage-mips.cc
new file mode 100644
index 0000000..b9f3fa2
--- /dev/null
+++ b/src/compiler/mips/linkage-mips.cc
@@ -0,0 +1,66 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/code-stubs.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/linkage-impl.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct MipsLinkageHelperTraits {
+  static Register ReturnValueReg() { return v0; }
+  static Register ReturnValue2Reg() { return v1; }
+  static Register JSCallFunctionReg() { return a1; }
+  static Register ContextReg() { return cp; }
+  static Register RuntimeCallFunctionReg() { return a1; }
+  static Register RuntimeCallArgCountReg() { return a0; }
+  static RegList CCalleeSaveRegisters() {
+    return s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() |
+           s6.bit() | s7.bit();
+  }
+  static Register CRegisterParameter(int i) {
+    static Register register_parameters[] = {a0, a1, a2, a3};
+    return register_parameters[i];
+  }
+  static int CRegisterParametersLength() { return 4; }
+};
+
+
+typedef LinkageHelper<MipsLinkageHelperTraits> LH;
+
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
+  return LH::GetJSCallDescriptor(zone, parameter_count);
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+    Runtime::FunctionId function, int parameter_count,
+    Operator::Properties properties, Zone* zone) {
+  return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
+                                      properties);
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+    CallInterfaceDescriptor descriptor, int stack_parameter_count,
+    CallDescriptor::Flags flags, Zone* zone) {
+  return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
+                                   flags);
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
+                                                  MachineSignature* sig) {
+  return LH::GetSimplifiedCDescriptor(zone, sig);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/simplified-operator.cc b/src/compiler/simplified-operator.cc
index bb2e76b..d486eb8 100644
--- a/src/compiler/simplified-operator.cc
+++ b/src/compiler/simplified-operator.cc
@@ -27,6 +27,32 @@
 }
 
 
+bool operator==(FieldAccess const& lhs, FieldAccess const& rhs) {
+  return lhs.base_is_tagged == rhs.base_is_tagged && lhs.offset == rhs.offset &&
+         lhs.type == rhs.type && lhs.machine_type == rhs.machine_type;
+}
+
+
+bool operator!=(FieldAccess const& lhs, FieldAccess const& rhs) {
+  return !(lhs == rhs);
+}
+
+
+std::ostream& operator<<(std::ostream& os, FieldAccess const& access) {
+  os << "[" << access.base_is_tagged << ", " << access.offset << ", ";
+#ifdef OBJECT_PRINT
+  Handle<Name> name;
+  if (access.name.ToHandle(&name)) {
+    name->Print(os);
+    os << ", ";
+  }
+#endif
+  access.type->PrintTo(os);
+  os << ", " << access.machine_type << "]";
+  return os;
+}
+
+
 std::ostream& operator<<(std::ostream& os, BoundsCheckMode bounds_check_mode) {
   switch (bounds_check_mode) {
     case kNoBoundsCheck:
diff --git a/src/compiler/simplified-operator.h b/src/compiler/simplified-operator.h
index b05cb20..92e8a72 100644
--- a/src/compiler/simplified-operator.h
+++ b/src/compiler/simplified-operator.h
@@ -38,13 +38,18 @@
 struct FieldAccess {
   BaseTaggedness base_is_tagged;  // specifies if the base pointer is tagged.
   int offset;                     // offset of the field, without tag.
-  Handle<Name> name;              // debugging only.
+  MaybeHandle<Name> name;         // debugging only.
   Type* type;                     // type of the field.
   MachineType machine_type;       // machine type of the field.
 
   int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
 };
 
+bool operator==(FieldAccess const& lhs, FieldAccess const& rhs);
+bool operator!=(FieldAccess const& lhs, FieldAccess const& rhs);
+
+std::ostream& operator<<(std::ostream&, FieldAccess const&);
+
 
 enum BoundsCheckMode { kNoBoundsCheck, kTypedArrayBoundsCheck };
 
diff --git a/src/compiler/x64/code-generator-x64.cc b/src/compiler/x64/code-generator-x64.cc
index 37aca10..6f9d68f 100644
--- a/src/compiler/x64/code-generator-x64.cc
+++ b/src/compiler/x64/code-generator-x64.cc
@@ -184,13 +184,21 @@
   } while (0)
 
 
-#define ASSEMBLE_SHIFT(asm_instr, width)                                 \
-  do {                                                                   \
-    if (HasImmediateInput(instr, 1)) {                                   \
-      __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
-    } else {                                                             \
-      __ asm_instr##_cl(i.OutputRegister());                             \
-    }                                                                    \
+#define ASSEMBLE_SHIFT(asm_instr, width)                                   \
+  do {                                                                     \
+    if (HasImmediateInput(instr, 1)) {                                     \
+      if (instr->Output()->IsRegister()) {                                 \
+        __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
+      } else {                                                             \
+        __ asm_instr(i.OutputOperand(), Immediate(i.InputInt##width(1)));  \
+      }                                                                    \
+    } else {                                                               \
+      if (instr->Output()->IsRegister()) {                                 \
+        __ asm_instr##_cl(i.OutputRegister());                             \
+      } else {                                                             \
+        __ asm_instr##_cl(i.OutputOperand());                              \
+      }                                                                    \
+    }                                                                      \
   } while (0)
 
 
diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc
index 8f52606..f744a2c 100644
--- a/src/compiler/x64/instruction-selector-x64.cc
+++ b/src/compiler/x64/instruction-selector-x64.cc
@@ -21,8 +21,6 @@
                                            Register::ToAllocationIndex(reg));
   }
 
-  InstructionOperand* UseImmediate64(Node* node) { return UseImmediate(node); }
-
   bool CanBeImmediate(Node* node) {
     switch (node->opcode()) {
       case IrOpcode::kInt32Constant:
@@ -32,23 +30,6 @@
     }
   }
 
-  bool CanBeImmediate64(Node* node) {
-    switch (node->opcode()) {
-      case IrOpcode::kInt32Constant:
-        return true;
-      case IrOpcode::kNumberConstant:
-        return true;
-      case IrOpcode::kHeapConstant: {
-        // Constants in new space cannot be used as immediates in V8 because
-        // the GC does not scan code objects when collecting the new generation.
-        Unique<HeapObject> value = OpParameter<Unique<HeapObject> >(node);
-        return !isolate()->heap()->InNewSpace(*value.handle());
-      }
-      default:
-        return false;
-    }
-  }
-
   bool CanBeBetterLeftOperand(Node* node) const {
     return !selector()->IsLive(node);
   }
@@ -350,9 +331,8 @@
   Node* left = node->InputAt(0);
   Node* right = node->InputAt(1);
 
-  // TODO(turbofan): assembler only supports some addressing modes for shifts.
   if (g.CanBeImmediate(right)) {
-    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(left),
                    g.UseImmediate(right));
   } else {
     Int32BinopMatcher m(node);
@@ -362,7 +342,7 @@
         right = mright.left().node();
       }
     }
-    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(left),
                    g.UseFixed(right, rcx));
   }
 }
@@ -376,9 +356,8 @@
   Node* left = node->InputAt(0);
   Node* right = node->InputAt(1);
 
-  // TODO(turbofan): assembler only supports some addressing modes for shifts.
   if (g.CanBeImmediate(right)) {
-    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(left),
                    g.UseImmediate(right));
   } else {
     Int64BinopMatcher m(node);
@@ -388,7 +367,7 @@
         right = mright.left().node();
       }
     }
-    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(left),
                    g.UseFixed(right, rcx));
   }
 }
diff --git a/src/factory.cc b/src/factory.cc
index 0adc873..25d8cd8 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -1716,8 +1716,22 @@
 }
 
 
-static JSFunction* GetTypedArrayFun(ExternalArrayType type,
-                                    Isolate* isolate) {
+namespace {
+
+ElementsKind GetExternalArrayElementsKind(ExternalArrayType type) {
+  switch (type) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+  case kExternal##Type##Array:                          \
+    return EXTERNAL_##TYPE##_ELEMENTS;
+    TYPED_ARRAYS(TYPED_ARRAY_CASE)
+  }
+  UNREACHABLE();
+  return FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND;
+#undef TYPED_ARRAY_CASE
+}
+
+
+JSFunction* GetTypedArrayFun(ExternalArrayType type, Isolate* isolate) {
   Context* native_context = isolate->context()->native_context();
   switch (type) {
 #define TYPED_ARRAY_FUN(Type, type, TYPE, ctype, size)                        \
@@ -1733,6 +1747,8 @@
   }
 }
 
+}  // namespace
+
 
 Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type) {
   Handle<JSFunction> typed_array_fun_handle(GetTypedArrayFun(type, isolate()));
@@ -1744,6 +1760,28 @@
 }
 
 
+Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
+                                              Handle<JSArrayBuffer> buffer,
+                                              size_t length) {
+  DCHECK(length <= static_cast<size_t>(kMaxInt));
+  Handle<JSTypedArray> array = NewJSTypedArray(type);
+  array->set_buffer(*buffer);
+  array->set_weak_next(buffer->weak_first_view());
+  buffer->set_weak_first_view(*array);
+  array->set_byte_offset(Smi::FromInt(0));
+  array->set_byte_length(buffer->byte_length());
+  Handle<Object> length_handle = NewNumberFromSize(length);
+  array->set_length(*length_handle);
+  Handle<ExternalArray> elements =
+      NewExternalArray(static_cast<int>(length), type, buffer->backing_store());
+  JSObject::SetMapAndElements(array,
+                              JSObject::GetElementsTransitionMap(
+                                  array, GetExternalArrayElementsKind(type)),
+                              elements);
+  return array;
+}
+
+
 Handle<JSProxy> Factory::NewJSProxy(Handle<Object> handler,
                                     Handle<Object> prototype) {
   // Allocate map.
diff --git a/src/factory.h b/src/factory.h
index 981dee0..8df53af 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -434,6 +434,11 @@
 
   Handle<JSTypedArray> NewJSTypedArray(ExternalArrayType type);
 
+  // Creates a new JSTypedArray with the specified buffer.
+  Handle<JSTypedArray> NewJSTypedArray(ExternalArrayType type,
+                                       Handle<JSArrayBuffer> buffer,
+                                       size_t length);
+
   Handle<JSDataView> NewJSDataView();
 
   // Allocates a Harmony proxy.
diff --git a/src/feedback-slots.h b/src/feedback-slots.h
deleted file mode 100644
index 9951fc8..0000000
--- a/src/feedback-slots.h
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_FEEDBACK_SLOTS_H_
-#define V8_FEEDBACK_SLOTS_H_
-
-#include "src/v8.h"
-
-#include "src/isolate.h"
-
-namespace v8 {
-namespace internal {
-
-class FeedbackSlotInterface {
- public:
-  static const int kInvalidFeedbackSlot = -1;
-
-  virtual ~FeedbackSlotInterface() {}
-
-  virtual int ComputeFeedbackSlotCount() = 0;
-  virtual void SetFirstFeedbackSlot(int slot) = 0;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_FEEDBACK_SLOTS_H_
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 4cc4f25..bfa2dca 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -531,10 +531,6 @@
             "trace progress of the incremental marking")
 DEFINE_BOOL(track_gc_object_stats, false,
             "track object counts and memory usage")
-DEFINE_BOOL(parallel_sweeping, false, "enable parallel sweeping")
-DEFINE_BOOL(concurrent_sweeping, true, "enable concurrent sweeping")
-DEFINE_INT(sweeper_threads, 0,
-           "number of parallel and concurrent sweeping threads")
 #ifdef VERIFY_HEAP
 DEFINE_BOOL(verify_heap, false, "verify heap pointers before and after GC")
 #endif
@@ -663,8 +659,6 @@
 DEFINE_BOOL(predictable, false, "enable predictable mode")
 DEFINE_NEG_IMPLICATION(predictable, concurrent_recompilation)
 DEFINE_NEG_IMPLICATION(predictable, concurrent_osr)
-DEFINE_NEG_IMPLICATION(predictable, concurrent_sweeping)
-DEFINE_NEG_IMPLICATION(predictable, parallel_sweeping)
 
 
 //
diff --git a/src/globals.h b/src/globals.h
index dc7568f..bef22cf 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -26,7 +26,7 @@
 #endif
 
 #if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM || \
-    V8_TARGET_ARCH_ARM64
+    V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS
 #define V8_TURBOFAN_BACKEND 1
 #else
 #define V8_TURBOFAN_BACKEND 0
@@ -148,10 +148,10 @@
 const bool kRequiresCodeRange = true;
 const size_t kMaximalCodeRangeSize = 512 * MB;
 #if V8_OS_WIN
-const size_t kMinimumCodeRangeSize = 2 * MB;
+const size_t kMinimumCodeRangeSize = 4 * MB;
 const size_t kReservedCodeRangePages = 1;
 #else
-const size_t kMinimumCodeRangeSize = 1 * MB;
+const size_t kMinimumCodeRangeSize = 3 * MB;
 const size_t kReservedCodeRangePages = 0;
 #endif
 #else
@@ -162,7 +162,7 @@
 // x32 port also requires code range.
 const bool kRequiresCodeRange = true;
 const size_t kMaximalCodeRangeSize = 256 * MB;
-const size_t kMinimumCodeRangeSize = 1 * MB;
+const size_t kMinimumCodeRangeSize = 3 * MB;
 const size_t kReservedCodeRangePages = 0;
 #else
 const bool kRequiresCodeRange = false;
diff --git a/src/handles.h b/src/handles.h
index 577e83a..eb57f0e 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -53,7 +53,8 @@
   }
 
   // Convert to a Handle with a type that can be upcasted to.
-  template <class S> INLINE(bool ToHandle(Handle<S>* out)) {
+  template <class S>
+  V8_INLINE bool ToHandle(Handle<S>* out) const {
     if (location_ == NULL) {
       *out = Handle<T>::null();
       return false;
diff --git a/src/heap/gc-idle-time-handler.cc b/src/heap/gc-idle-time-handler.cc
index b9a99b2..3e36449 100644
--- a/src/heap/gc-idle-time-handler.cc
+++ b/src/heap/gc-idle-time-handler.cc
@@ -71,30 +71,48 @@
 }
 
 
-size_t GCIdleTimeHandler::EstimateScavengeTime(
-    size_t new_space_size, size_t scavenge_speed_in_bytes_per_ms) {
+bool GCIdleTimeHandler::DoScavenge(
+    size_t idle_time_in_ms, size_t new_space_size, size_t used_new_space_size,
+    size_t scavenge_speed_in_bytes_per_ms,
+    size_t new_space_allocation_throughput_in_bytes_per_ms) {
+  size_t new_space_allocation_limit =
+      kMaxFrameRenderingIdleTime * scavenge_speed_in_bytes_per_ms;
+
+  // If the limit is larger than the new space size, then scavenging used to be
+  // really fast. We can take advantage of the whole new space.
+  if (new_space_allocation_limit > new_space_size) {
+    new_space_allocation_limit = new_space_size;
+  }
+
+  // We do not know the allocation throughput before the first Scavenge.
+  // TODO(hpayer): Estimate allocation throughput before the first Scavenge.
+  if (new_space_allocation_throughput_in_bytes_per_ms == 0) {
+    new_space_allocation_limit =
+        static_cast<size_t>(new_space_size * kConservativeTimeRatio);
+  } else {
+    // We have to trigger scavenge before we reach the end of new space.
+    new_space_allocation_limit -=
+        new_space_allocation_throughput_in_bytes_per_ms *
+        kMaxFrameRenderingIdleTime;
+  }
+
   if (scavenge_speed_in_bytes_per_ms == 0) {
     scavenge_speed_in_bytes_per_ms = kInitialConservativeScavengeSpeed;
   }
-  return new_space_size / scavenge_speed_in_bytes_per_ms;
-}
 
-
-bool GCIdleTimeHandler::ScavangeMayHappenSoon(
-    size_t available_new_space_memory,
-    size_t new_space_allocation_throughput_in_bytes_per_ms) {
-  if (available_new_space_memory <=
-      new_space_allocation_throughput_in_bytes_per_ms *
-          kMaxFrameRenderingIdleTime) {
-    return true;
+  if (new_space_allocation_limit <= used_new_space_size) {
+    if (used_new_space_size / scavenge_speed_in_bytes_per_ms <=
+        idle_time_in_ms) {
+      return true;
+    }
   }
   return false;
 }
 
 
 // The following logic is implemented by the controller:
-// (1) If the new space is almost full and we can effort a Scavenge, then a
-// Scavenge is performed.
+// (1) If the new space is almost full and we can affort a Scavenge or if the
+// next Scavenge will very likely take long, then a Scavenge is performed.
 // (2) If there is currently no MarkCompact idle round going on, we start a
 // new idle round if enough garbage was created or we received a context
 // disposal event. Otherwise we do not perform garbage collection to keep
@@ -110,15 +128,13 @@
 // that this currently may trigger a full garbage collection.
 GCIdleTimeAction GCIdleTimeHandler::Compute(size_t idle_time_in_ms,
                                             HeapState heap_state) {
-  if (idle_time_in_ms <= kMaxFrameRenderingIdleTime &&
-      ScavangeMayHappenSoon(
-          heap_state.available_new_space_memory,
-          heap_state.new_space_allocation_throughput_in_bytes_per_ms) &&
-      idle_time_in_ms >=
-          EstimateScavengeTime(heap_state.new_space_capacity,
-                               heap_state.scavenge_speed_in_bytes_per_ms)) {
+  if (DoScavenge(idle_time_in_ms, heap_state.new_space_capacity,
+                 heap_state.used_new_space_size,
+                 heap_state.scavenge_speed_in_bytes_per_ms,
+                 heap_state.new_space_allocation_throughput_in_bytes_per_ms)) {
     return GCIdleTimeAction::Scavenge();
   }
+
   if (IsMarkCompactIdleRoundFinished()) {
     if (EnoughGarbageSinceLastIdleRound() || heap_state.contexts_disposed > 0) {
       StartIdleRound();
diff --git a/src/heap/gc-idle-time-handler.h b/src/heap/gc-idle-time-handler.h
index daab616..15aaea6 100644
--- a/src/heap/gc-idle-time-handler.h
+++ b/src/heap/gc-idle-time-handler.h
@@ -113,10 +113,6 @@
   // That is the maximum idle time we will have during frame rendering.
   static const size_t kMaxFrameRenderingIdleTime = 16;
 
-  // If less than that much memory is left in the new space, we consider it
-  // as almost full and force a new space collection earlier in the idle time.
-  static const size_t kNewSpaceAlmostFullTreshold = 100 * KB;
-
   // If we haven't recorded any scavenger events yet, we use a conservative
   // lower bound for the scavenger speed.
   static const size_t kInitialConservativeScavengeSpeed = 100 * KB;
@@ -130,7 +126,7 @@
     size_t mark_compact_speed_in_bytes_per_ms;
     size_t incremental_marking_speed_in_bytes_per_ms;
     size_t scavenge_speed_in_bytes_per_ms;
-    size_t available_new_space_memory;
+    size_t used_new_space_size;
     size_t new_space_capacity;
     size_t new_space_allocation_throughput_in_bytes_per_ms;
   };
@@ -159,11 +155,9 @@
   static size_t EstimateMarkCompactTime(
       size_t size_of_objects, size_t mark_compact_speed_in_bytes_per_ms);
 
-  static size_t EstimateScavengeTime(size_t new_space_size,
-                                     size_t scavenger_speed_in_bytes_per_ms);
-
-  static bool ScavangeMayHappenSoon(
-      size_t available_new_space_memory,
+  static bool DoScavenge(
+      size_t idle_time_in_ms, size_t new_space_size, size_t used_new_space_size,
+      size_t scavenger_speed_in_bytes_per_ms,
       size_t new_space_allocation_throughput_in_bytes_per_ms);
 
  private:
diff --git a/src/heap/heap.cc b/src/heap/heap.cc
index f76a0cf..0ccd0ab 100644
--- a/src/heap/heap.cc
+++ b/src/heap/heap.cc
@@ -2522,6 +2522,13 @@
       roots_[entry.index] = map;
     }
 
+    {  // Create a separate external one byte string map for native sources.
+      AllocationResult allocation = AllocateMap(EXTERNAL_ONE_BYTE_STRING_TYPE,
+                                                ExternalOneByteString::kSize);
+      if (!allocation.To(&obj)) return false;
+      set_native_source_string_map(Map::cast(obj));
+    }
+
     ALLOCATE_VARSIZE_MAP(STRING_TYPE, undetectable_string)
     undetectable_string_map()->set_is_undetectable();
 
@@ -4314,7 +4321,7 @@
       tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
   heap_state.scavenge_speed_in_bytes_per_ms =
       static_cast<size_t>(tracer()->ScavengeSpeedInBytesPerMillisecond());
-  heap_state.available_new_space_memory = new_space_.Available();
+  heap_state.used_new_space_size = new_space_.Size();
   heap_state.new_space_capacity = new_space_.Capacity();
   heap_state.new_space_allocation_throughput_in_bytes_per_ms =
       static_cast<size_t>(
diff --git a/src/heap/heap.h b/src/heap/heap.h
index b281cb4..c37464c 100644
--- a/src/heap/heap.h
+++ b/src/heap/heap.h
@@ -83,6 +83,7 @@
   V(Map, external_string_with_one_byte_data_map,                               \
     ExternalStringWithOneByteDataMap)                                          \
   V(Map, external_one_byte_string_map, ExternalOneByteStringMap)               \
+  V(Map, native_source_string_map, NativeSourceStringMap)                      \
   V(Map, short_external_string_map, ShortExternalStringMap)                    \
   V(Map, short_external_string_with_one_byte_data_map,                         \
     ShortExternalStringWithOneByteDataMap)                                     \
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc
index ae99d98..69fc579 100644
--- a/src/heap/mark-compact.cc
+++ b/src/heap/mark-compact.cc
@@ -4167,7 +4167,6 @@
 
     switch (sweeper) {
       case CONCURRENT_SWEEPING:
-      case PARALLEL_SWEEPING:
         if (!parallel_sweeping_active) {
           if (FLAG_gc_verbose) {
             PrintF("Sweeping 0x%" V8PRIxPTR ".\n",
@@ -4218,19 +4217,6 @@
 }
 
 
-static bool ShouldStartSweeperThreads(MarkCompactCollector::SweeperType type) {
-  return (type == MarkCompactCollector::PARALLEL_SWEEPING ||
-          type == MarkCompactCollector::CONCURRENT_SWEEPING) &&
-         !FLAG_predictable;
-}
-
-
-static bool ShouldWaitForSweeperThreads(
-    MarkCompactCollector::SweeperType type) {
-  return type == MarkCompactCollector::PARALLEL_SWEEPING;
-}
-
-
 void MarkCompactCollector::SweepSpaces() {
   GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
   double start_time = 0.0;
@@ -4241,10 +4227,6 @@
 #ifdef DEBUG
   state_ = SWEEP_SPACES;
 #endif
-  SweeperType how_to_sweep = CONCURRENT_SWEEPING;
-  if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_SWEEPING;
-  if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_SWEEPING;
-
   MoveEvacuationCandidatesToEndOfPagesList();
 
   // Noncompacting collections simply sweep the spaces to clear the mark
@@ -4257,17 +4239,13 @@
                                 GCTracer::Scope::MC_SWEEP_OLDSPACE);
     {
       SequentialSweepingScope scope(this);
-      SweepSpace(heap()->old_pointer_space(), how_to_sweep);
-      SweepSpace(heap()->old_data_space(), how_to_sweep);
+      SweepSpace(heap()->old_pointer_space(), CONCURRENT_SWEEPING);
+      SweepSpace(heap()->old_data_space(), CONCURRENT_SWEEPING);
     }
 
-    if (ShouldStartSweeperThreads(how_to_sweep)) {
+    if (!FLAG_predictable) {
       StartSweeperThreads();
     }
-
-    if (ShouldWaitForSweeperThreads(how_to_sweep)) {
-      EnsureSweepingCompleted();
-    }
   }
   RemoveDeadInvalidatedCode();
 
diff --git a/src/heap/mark-compact.h b/src/heap/mark-compact.h
index 6afb8d6..5e615b3 100644
--- a/src/heap/mark-compact.h
+++ b/src/heap/mark-compact.h
@@ -547,7 +547,6 @@
   void EnableCodeFlushing(bool enable);
 
   enum SweeperType {
-    PARALLEL_SWEEPING,
     CONCURRENT_SWEEPING,
     SEQUENTIAL_SWEEPING
   };
diff --git a/src/hydrogen-bch.cc b/src/hydrogen-bch.cc
index 5af6030..2feb158 100644
--- a/src/hydrogen-bch.cc
+++ b/src/hydrogen-bch.cc
@@ -237,14 +237,13 @@
     // constant limit we will use that instead of the induction limit.
     bool has_upper_constant_limit = true;
     int32_t upper_constant_limit =
-        check != NULL && check->HasUpperLimit() ? check->upper_limit() : 0;
+        check->HasUpperLimit() ? check->upper_limit() : 0;
     for (InductionVariableData::InductionVariableCheck* current_check = check;
          current_check != NULL;
          current_check = current_check->next()) {
       has_upper_constant_limit =
-          has_upper_constant_limit &&
-          check->HasUpperLimit() &&
-          check->upper_limit() == upper_constant_limit;
+          has_upper_constant_limit && current_check->HasUpperLimit() &&
+          current_check->upper_limit() == upper_constant_limit;
       counters()->bounds_checks_eliminated()->Increment();
       current_check->check()->set_skip_check();
     }
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index eb6d4f0..1544bad 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -2211,7 +2211,7 @@
  */
 int32_t InductionVariableData::ComputeIncrement(HPhi* phi,
                                                 HValue* phi_operand) {
-  if (!phi_operand->representation().IsInteger32()) return 0;
+  if (!phi_operand->representation().IsSmiOrInteger32()) return 0;
 
   if (phi_operand->IsAdd()) {
     HAdd* operation = HAdd::cast(phi_operand);
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 3c0042d..aec7644 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -15,7 +15,6 @@
 #include "src/conversions.h"
 #include "src/data-flow.h"
 #include "src/deoptimizer.h"
-#include "src/feedback-slots.h"
 #include "src/hydrogen-types.h"
 #include "src/small-pointer-list.h"
 #include "src/unique.h"
@@ -5475,8 +5474,7 @@
   Handle<String> name() const { return name_; }
   bool for_typeof() const { return for_typeof_; }
   int slot() const {
-    DCHECK(FLAG_vector_ics &&
-           slot_ != FeedbackSlotInterface::kInvalidFeedbackSlot);
+    DCHECK(FLAG_vector_ics && slot_ != AstNode::kInvalidFeedbackSlot);
     return slot_;
   }
   Handle<FixedArray> feedback_vector() const { return feedback_vector_; }
@@ -5497,8 +5495,9 @@
  private:
   HLoadGlobalGeneric(HValue* context, HValue* global_object,
                      Handle<String> name, bool for_typeof)
-      : name_(name), for_typeof_(for_typeof),
-        slot_(FeedbackSlotInterface::kInvalidFeedbackSlot) {
+      : name_(name),
+        for_typeof_(for_typeof),
+        slot_(AstNode::kInvalidFeedbackSlot) {
     SetOperandAt(0, context);
     SetOperandAt(1, global_object);
     set_representation(Representation::Tagged());
@@ -6478,8 +6477,7 @@
   Handle<Object> name() const { return name_; }
 
   int slot() const {
-    DCHECK(FLAG_vector_ics &&
-           slot_ != FeedbackSlotInterface::kInvalidFeedbackSlot);
+    DCHECK(FLAG_vector_ics && slot_ != AstNode::kInvalidFeedbackSlot);
     return slot_;
   }
   Handle<FixedArray> feedback_vector() const { return feedback_vector_; }
@@ -6499,8 +6497,7 @@
 
  private:
   HLoadNamedGeneric(HValue* context, HValue* object, Handle<Object> name)
-      : name_(name),
-        slot_(FeedbackSlotInterface::kInvalidFeedbackSlot) {
+      : name_(name), slot_(AstNode::kInvalidFeedbackSlot) {
     SetOperandAt(0, context);
     SetOperandAt(1, object);
     set_representation(Representation::Tagged());
@@ -6756,8 +6753,7 @@
   HValue* key() const { return OperandAt(1); }
   HValue* context() const { return OperandAt(2); }
   int slot() const {
-    DCHECK(FLAG_vector_ics &&
-           slot_ != FeedbackSlotInterface::kInvalidFeedbackSlot);
+    DCHECK(FLAG_vector_ics && slot_ != AstNode::kInvalidFeedbackSlot);
     return slot_;
   }
   Handle<FixedArray> feedback_vector() const { return feedback_vector_; }
@@ -6780,7 +6776,7 @@
 
  private:
   HLoadKeyedGeneric(HValue* context, HValue* obj, HValue* key)
-      : slot_(FeedbackSlotInterface::kInvalidFeedbackSlot) {
+      : slot_(AstNode::kInvalidFeedbackSlot) {
     set_representation(Representation::Tagged());
     SetOperandAt(0, obj);
     SetOperandAt(1, key);
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index e1e3af2..54adfea 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -4,15 +4,12 @@
 
 #include "src/hydrogen.h"
 
-#include <algorithm>
 #include <sstream>
 
 #include "src/v8.h"
 
 #include "src/allocation-site-scopes.h"
-#include "src/codegen.h"
 #include "src/full-codegen.h"
-#include "src/hashmap.h"
 #include "src/hydrogen-bce.h"
 #include "src/hydrogen-bch.h"
 #include "src/hydrogen-canonicalize.h"
@@ -43,7 +40,6 @@
 #include "src/parser.h"
 #include "src/runtime/runtime.h"
 #include "src/scopeinfo.h"
-#include "src/scopes.h"
 #include "src/typing.h"
 
 #if V8_TARGET_ARCH_IA32
@@ -2903,10 +2899,6 @@
                                 length, NULL);
     }
 
-    if (capacity == NULL) {
-      capacity = AddLoadFixedArrayLength(to_elements);
-    }
-
     LoopBuilder builder(this, context(), LoopBuilder::kPostDecrement);
 
     HValue* key = builder.BeginBody(length, graph()->GetConstant0(),
@@ -6313,7 +6305,7 @@
   HControlInstruction* smi_check = NULL;
   handled_string = false;
 
-  for (int i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) {
+  for (i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) {
     PropertyAccessInfo info(this, access_type, ToType(types->at(i)), name);
     if (info.type()->Is(Type::String())) {
       if (handled_string) continue;
@@ -6391,7 +6383,7 @@
   // know about and do not want to handle ones we've never seen.  Otherwise
   // use a generic IC.
   if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
-    FinishExitWithHardDeoptimization("Uknown map in polymorphic access");
+    FinishExitWithHardDeoptimization("Unknown map in polymorphic access");
   } else {
     HInstruction* instr = BuildNamedGeneric(access_type, expr, object, name,
                                             value);
@@ -9111,7 +9103,6 @@
                         LookupIterator::OWN_SKIP_INTERCEPTOR);
       GlobalPropertyAccess type = LookupGlobalProperty(var, &it, LOAD);
       if (type == kUseCell) {
-        Handle<GlobalObject> global(current_info()->global_object());
         known_global_function = expr->ComputeGlobalTarget(global, &it);
       }
       if (known_global_function) {
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 5c34b68..29885b3 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -315,7 +315,6 @@
   HEnvironment* start_environment() const { return start_environment_; }
 
   void FinalizeUniqueness();
-  bool ProcessArgumentsObject();
   void OrderBlocks();
   void AssignDominators();
   void RestoreActualValues();
@@ -479,8 +478,6 @@
     phase.Run();
   }
 
-  void EliminateRedundantBoundsChecksUsingInductionVariables();
-
   Isolate* isolate_;
   int next_block_id_;
   HBasicBlock* entry_block_;
@@ -2204,7 +2201,6 @@
   void VisitLogicalExpression(BinaryOperation* expr);
   void VisitArithmeticExpression(BinaryOperation* expr);
 
-  bool PreProcessOsrEntry(IterationStatement* statement);
   void VisitLoopBody(IterationStatement* stmt,
                      HBasicBlock* loop_entry);
 
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index fd6a8d6..5e8333d 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -982,24 +982,24 @@
 }
 
 
-void Assembler::ror(Register dst, uint8_t imm8) {
+void Assembler::ror(const Operand& dst, uint8_t imm8) {
   EnsureSpace ensure_space(this);
   DCHECK(is_uint5(imm8));  // illegal shift count
   if (imm8 == 1) {
     EMIT(0xD1);
-    EMIT(0xC8 | dst.code());
+    emit_operand(ecx, dst);
   } else {
     EMIT(0xC1);
-    EMIT(0xC8 | dst.code());
+    emit_operand(ecx, dst);
     EMIT(imm8);
   }
 }
 
 
-void Assembler::ror_cl(Register dst) {
+void Assembler::ror_cl(const Operand& dst) {
   EnsureSpace ensure_space(this);
   EMIT(0xD3);
-  EMIT(0xC8 | dst.code());
+  emit_operand(ecx, dst);
 }
 
 
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index cb17655..4532a67 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -740,8 +740,11 @@
 
   void rcl(Register dst, uint8_t imm8);
   void rcr(Register dst, uint8_t imm8);
-  void ror(Register dst, uint8_t imm8);
-  void ror_cl(Register dst);
+
+  void ror(Register dst, uint8_t imm8) { ror(Operand(dst), imm8); }
+  void ror(const Operand& dst, uint8_t imm8);
+  void ror_cl(Register dst) { ror_cl(Operand(dst)); }
+  void ror_cl(const Operand& dst);
 
   void sar(Register dst, uint8_t imm8) { sar(Operand(dst), imm8); }
   void sar(const Operand& dst, uint8_t imm8);
diff --git a/src/lithium.cc b/src/lithium.cc
index 7d992a1..d57a2dd 100644
--- a/src/lithium.cc
+++ b/src/lithium.cc
@@ -463,8 +463,8 @@
   LOG_CODE_EVENT(info()->isolate(),
                  CodeStartLinePosInfoRecordEvent(
                      assembler.positions_recorder()));
-  // TODO(yangguo) remove this once the code serializer handles code stubs.
-  if (info()->will_serialize()) assembler.enable_serializer();
+  // Code serializer only takes unoptimized code.
+  DCHECK(!info()->will_serialize());
   LCodeGen generator(this, &assembler, info());
 
   MarkEmptyBlocks();
diff --git a/src/parser.cc b/src/parser.cc
index ed04d24..ec7605b 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -3381,7 +3381,7 @@
 
   // Too many parentheses around expression:
   //   (( ... )) => ...
-  if (expression->parenthesization_level() > 1) return false;
+  if (expression->is_multi_parenthesized()) return false;
 
   // Case for a single parameter:
   //   (foo) => ...
diff --git a/src/serialize.cc b/src/serialize.cc
index 9d59b6f..0cc629d 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -834,6 +834,7 @@
   }
 }
 
+
 void Deserializer::ReadChunk(Object** current,
                              Object** limit,
                              int source_space,
@@ -1514,10 +1515,8 @@
 }
 
 
-void Serializer::ObjectSerializer::Serialize() {
-  int space = Serializer::SpaceOfObject(object_);
-  int size = object_->Size();
-
+void Serializer::ObjectSerializer::SerializePrologue(int space, int size,
+                                                     Map* map) {
   sink_->Put(kNewObject + reference_representation_ + space,
              "ObjectSerialization");
   sink_->PutInt(size >> kObjectAlignmentBits, "Size in words");
@@ -1546,12 +1545,85 @@
   }
 
   // Serialize the map (first word of the object).
-  serializer_->SerializeObject(object_->map(), kPlain, kStartOfObject, 0);
+  serializer_->SerializeObject(map, kPlain, kStartOfObject, 0);
+}
+
+
+void Serializer::ObjectSerializer::SerializeExternalString() {
+  // Instead of serializing this as an external string, we serialize
+  // an imaginary sequential string with the same content.
+  Isolate* isolate = serializer_->isolate();
+  DCHECK(object_->IsExternalString());
+  DCHECK(object_->map() != isolate->heap()->native_source_string_map());
+  ExternalString* string = ExternalString::cast(object_);
+  int length = string->length();
+  Map* map;
+  int size;
+  const char* resource;
+  // Find the map and size for the imaginary sequential string.
+  if (object_->IsExternalOneByteString()) {
+    map = isolate->heap()->one_byte_internalized_string_map();
+    size = SeqOneByteString::SizeFor(length);
+    resource = ExternalOneByteString::cast(string)->resource()->data();
+  } else {
+    map = isolate->heap()->internalized_string_map();
+    size = SeqTwoByteString::SizeFor(length);
+    resource = reinterpret_cast<const char*>(
+        ExternalTwoByteString::cast(string)->resource()->data());
+  }
+
+  int space =
+      (size > Page::kMaxRegularHeapObjectSize) ? LO_SPACE : OLD_DATA_SPACE;
+  SerializePrologue(space, size, map);
+
+  // Output the rest of the imaginary string.
+  int bytes_to_output = size - HeapObject::kHeaderSize;
+
+  // Output raw data header. Do not bother with common raw length cases here.
+  sink_->Put(kRawData, "RawDataForString");
+  sink_->PutInt(bytes_to_output, "length");
+
+  // Serialize string header (except for map).
+  Address string_start = string->address();
+  for (int i = HeapObject::kHeaderSize; i < SeqString::kHeaderSize; i++) {
+    sink_->PutSection(string_start[i], "StringHeader");
+  }
+
+  // Serialize string content.
+  int content_length = size - SeqString::kHeaderSize;
+  for (int i = 0; i < content_length; i++) {
+    sink_->PutSection(resource[i], "StringContent");
+  }
+
+  sink_->Put(kSkip, "SkipAfterString");
+  sink_->PutInt(bytes_to_output, "SkipDistance");
+}
+
+
+void Serializer::ObjectSerializer::Serialize() {
+  if (object_->IsExternalString()) {
+    Heap* heap = serializer_->isolate()->heap();
+    if (object_->map() != heap->native_source_string_map()) {
+      // Usually we cannot recreate resources for external strings. To work
+      // around this, external strings are serialized to look like ordinary
+      // sequential strings.
+      // The exception are native source code strings, since we can recreate
+      // their resources. In that case we fall through and leave it to
+      // VisitExternalOneByteString further down.
+      SerializeExternalString();
+      return;
+    }
+  }
+
+  int size = object_->Size();
+  Map* map = object_->map();
+  SerializePrologue(Serializer::SpaceOfObject(object_), size, map);
 
   // Serialize the rest of the object.
   CHECK_EQ(0, bytes_processed_so_far_);
   bytes_processed_so_far_ = kPointerSize;
-  object_->IterateBody(object_->map()->instance_type(), size, this);
+
+  object_->IterateBody(map->instance_type(), size, this);
   OutputRawData(object_->address() + size);
 }
 
@@ -1697,7 +1769,7 @@
     }
   }
   // One of the strings in the natives cache should match the resource.  We
-  // can't serialize any other kinds of external strings.
+  // don't expect any other kinds of external strings here.
   UNREACHABLE();
 }
 
@@ -1876,6 +1948,11 @@
   }
 
   if (address_mapper_.IsMapped(heap_object)) {
+    if (FLAG_trace_code_serializer) {
+      PrintF("Encoding back reference to: ");
+      heap_object->ShortPrint();
+      PrintF("\n");
+    }
     SerializeReferenceToPreviousObject(heap_object, how_to_code, where_to_point,
                                        skip);
     return;
@@ -1985,6 +2062,7 @@
          (how_to_code == kFromCode && where_to_point == kInnerPointer));
   uint32_t stub_key = stub->stub_key();
   DCHECK(CodeStub::MajorKeyFromKey(stub_key) != CodeStub::NoCache);
+  DCHECK(!CodeStub::GetCode(isolate(), stub_key).is_null());
 
   int index = AddCodeStubKey(stub_key) + kCodeStubsBaseIndex;
 
diff --git a/src/serialize.h b/src/serialize.h
index 616f8f1..6fa7a34 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -439,12 +439,16 @@
     }
 
    private:
+    void SerializePrologue(int space, int size, Map* map);
+
     enum ReturnSkip { kCanReturnSkipInsteadOfSkipping, kIgnoringReturn };
     // This function outputs or skips the raw data between the last pointer and
     // up to the current position.  It optionally can just return the number of
     // bytes to skip instead of performing a skip instruction, in case the skip
     // can be merged into the next instruction.
     int OutputRawData(Address up_to, ReturnSkip return_skip = kIgnoringReturn);
+    // External strings are serialized in a way to resemble sequential strings.
+    void SerializeExternalString();
 
     Serializer* serializer_;
     HeapObject* object_;
diff --git a/src/type-info.cc b/src/type-info.cc
index cf3950f..5b9a71d 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -9,11 +9,8 @@
 #include "src/compiler.h"
 #include "src/ic/ic.h"
 #include "src/ic/stub-cache.h"
-#include "src/macro-assembler.h"
 #include "src/type-info.h"
 
-#include "src/objects-inl.h"
-
 namespace v8 {
 namespace internal {
 
@@ -81,17 +78,6 @@
 }
 
 
-bool TypeFeedbackOracle::StoreIsKeyedPolymorphic(TypeFeedbackId ast_id) {
-  Handle<Object> maybe_code = GetInfo(ast_id);
-  if (maybe_code->IsCode()) {
-    Handle<Code> code = Handle<Code>::cast(maybe_code);
-    return code->is_keyed_store_stub() &&
-        code->ic_state() == POLYMORPHIC;
-  }
-  return false;
-}
-
-
 bool TypeFeedbackOracle::CallIsMonomorphic(int slot) {
   Handle<Object> value = GetInfo(slot);
   return value->IsAllocationSite() || value->IsJSFunction();
diff --git a/src/type-info.h b/src/type-info.h
index 434ddd6..1343e0a 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -25,7 +25,6 @@
 
   bool LoadIsUninitialized(TypeFeedbackId id);
   bool StoreIsUninitialized(TypeFeedbackId id);
-  bool StoreIsKeyedPolymorphic(TypeFeedbackId id);
   bool CallIsMonomorphic(int slot);
   bool CallIsMonomorphic(TypeFeedbackId aid);
   bool KeyedArrayCallIsHoley(TypeFeedbackId id);
diff --git a/src/utils.h b/src/utils.h
index 2991815..ad8c020 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -61,7 +61,6 @@
   }
   DCHECK_EQ(1 << bits, original_x);
   return bits;
-  return 0;
 }
 
 
diff --git a/src/version.cc b/src/version.cc
index 1d00e42..85757fc 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // system so their names cannot be changed without changing the scripts.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     30
-#define BUILD_NUMBER      2
+#define BUILD_NUMBER      3
 #define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index a9fb834..01103b2 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -617,6 +617,24 @@
 }
 
 
+void Assembler::shift(Operand dst, Immediate shift_amount, int subcode,
+                      int size) {
+  EnsureSpace ensure_space(this);
+  DCHECK(size == kInt64Size ? is_uint6(shift_amount.value_)
+                            : is_uint5(shift_amount.value_));
+  if (shift_amount.value_ == 1) {
+    emit_rex(dst, size);
+    emit(0xD1);
+    emit_operand(subcode, dst);
+  } else {
+    emit_rex(dst, size);
+    emit(0xC1);
+    emit_operand(subcode, dst);
+    emit(shift_amount.value_);
+  }
+}
+
+
 void Assembler::shift(Register dst, int subcode, int size) {
   EnsureSpace ensure_space(this);
   emit_rex(dst, size);
@@ -625,6 +643,14 @@
 }
 
 
+void Assembler::shift(Operand dst, int subcode, int size) {
+  EnsureSpace ensure_space(this);
+  emit_rex(dst, size);
+  emit(0xD3);
+  emit_operand(subcode, dst);
+}
+
+
 void Assembler::bt(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
   emit_rex_64(src, dst);
@@ -948,11 +974,13 @@
   emit_rex(dst, src, size);
   if (is_int8(imm.value_)) {
     emit(0x6B);
+    emit_operand(dst, src);
+    emit(imm.value_);
   } else {
     emit(0x69);
+    emit_operand(dst, src);
+    emitl(imm.value_);
   }
-  emit_operand(dst, src);
-  emit(imm.value_);
 }
 
 
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 529b100..b461124 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -813,30 +813,42 @@
   // Multiply rax by src, put the result in rdx:rax.
   void mul(Register src);
 
-#define DECLARE_SHIFT_INSTRUCTION(instruction, subcode)     \
-  void instruction##p(Register dst, Immediate imm8) {       \
-    shift(dst, imm8, subcode, kPointerSize);                \
-  }                                                         \
-                                                            \
-  void instruction##l(Register dst, Immediate imm8) {       \
-    shift(dst, imm8, subcode, kInt32Size);                  \
-  }                                                         \
-                                                            \
-  void instruction##q(Register dst, Immediate imm8) {       \
-    shift(dst, imm8, subcode, kInt64Size);                  \
-  }                                                         \
-                                                            \
-  void instruction##p_cl(Register dst) {                    \
-    shift(dst, subcode, kPointerSize);                      \
-  }                                                         \
-                                                            \
-  void instruction##l_cl(Register dst) {                    \
-    shift(dst, subcode, kInt32Size);                        \
-  }                                                         \
-                                                            \
-  void instruction##q_cl(Register dst) {                    \
-    shift(dst, subcode, kInt64Size);                        \
-  }
+#define DECLARE_SHIFT_INSTRUCTION(instruction, subcode)                       \
+  void instruction##p(Register dst, Immediate imm8) {                         \
+    shift(dst, imm8, subcode, kPointerSize);                                  \
+  }                                                                           \
+                                                                              \
+  void instruction##l(Register dst, Immediate imm8) {                         \
+    shift(dst, imm8, subcode, kInt32Size);                                    \
+  }                                                                           \
+                                                                              \
+  void instruction##q(Register dst, Immediate imm8) {                         \
+    shift(dst, imm8, subcode, kInt64Size);                                    \
+  }                                                                           \
+                                                                              \
+  void instruction##p(Operand dst, Immediate imm8) {                          \
+    shift(dst, imm8, subcode, kPointerSize);                                  \
+  }                                                                           \
+                                                                              \
+  void instruction##l(Operand dst, Immediate imm8) {                          \
+    shift(dst, imm8, subcode, kInt32Size);                                    \
+  }                                                                           \
+                                                                              \
+  void instruction##q(Operand dst, Immediate imm8) {                          \
+    shift(dst, imm8, subcode, kInt64Size);                                    \
+  }                                                                           \
+                                                                              \
+  void instruction##p_cl(Register dst) { shift(dst, subcode, kPointerSize); } \
+                                                                              \
+  void instruction##l_cl(Register dst) { shift(dst, subcode, kInt32Size); }   \
+                                                                              \
+  void instruction##q_cl(Register dst) { shift(dst, subcode, kInt64Size); }   \
+                                                                              \
+  void instruction##p_cl(Operand dst) { shift(dst, subcode, kPointerSize); }  \
+                                                                              \
+  void instruction##l_cl(Operand dst) { shift(dst, subcode, kInt32Size); }    \
+                                                                              \
+  void instruction##q_cl(Operand dst) { shift(dst, subcode, kInt64Size); }
   SHIFT_INSTRUCTION_LIST(DECLARE_SHIFT_INSTRUCTION)
 #undef DECLARE_SHIFT_INSTRUCTION
 
@@ -1365,9 +1377,11 @@
                                int size);
 
   // Emit machine code for a shift operation.
+  void shift(Operand dst, Immediate shift_amount, int subcode, int size);
   void shift(Register dst, Immediate shift_amount, int subcode, int size);
   // Shift dst by cl % 64 bits.
   void shift(Register dst, int subcode, int size);
+  void shift(Operand dst, int subcode, int size);
 
   void emit_farith(int b1, int b2, int i);
 
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 2b8fc2d..dcbba9f 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -709,65 +709,62 @@
 
 int DisassemblerX64::ShiftInstruction(byte* data) {
   byte op = *data & (~1);
+  int count = 1;
   if (op != 0xD0 && op != 0xD2 && op != 0xC0) {
     UnimplementedInstruction();
-    return 1;
+    return count;
   }
-  byte modrm = *(data + 1);
-  int mod, regop, rm;
-  get_modrm(modrm, &mod, &regop, &rm);
-  regop &= 0x7;  // The REX.R bit does not affect the operation.
-  int imm8 = -1;
-  int num_bytes = 2;
-  if (mod != 3) {
-    UnimplementedInstruction();
-    return num_bytes;
+  // Print mneumonic.
+  {
+    byte modrm = *(data + count);
+    int mod, regop, rm;
+    get_modrm(modrm, &mod, &regop, &rm);
+    regop &= 0x7;  // The REX.R bit does not affect the operation.
+    const char* mnem = NULL;
+    switch (regop) {
+      case 0:
+        mnem = "rol";
+        break;
+      case 1:
+        mnem = "ror";
+        break;
+      case 2:
+        mnem = "rcl";
+        break;
+      case 3:
+        mnem = "rcr";
+        break;
+      case 4:
+        mnem = "shl";
+        break;
+      case 5:
+        mnem = "shr";
+        break;
+      case 7:
+        mnem = "sar";
+        break;
+      default:
+        UnimplementedInstruction();
+        return count + 1;
+    }
+    DCHECK_NE(NULL, mnem);
+    AppendToBuffer("%s%c ", mnem, operand_size_code());
   }
-  const char* mnem = NULL;
-  switch (regop) {
-    case 0:
-      mnem = "rol";
-      break;
-    case 1:
-      mnem = "ror";
-      break;
-    case 2:
-      mnem = "rcl";
-      break;
-    case 3:
-      mnem = "rcr";
-      break;
-    case 4:
-      mnem = "shl";
-      break;
-    case 5:
-      mnem = "shr";
-      break;
-    case 7:
-      mnem = "sar";
-      break;
-    default:
-      UnimplementedInstruction();
-      return num_bytes;
-  }
-  DCHECK_NE(NULL, mnem);
-  if (op == 0xD0) {
-    imm8 = 1;
-  } else if (op == 0xC0) {
-    imm8 = *(data + 2);
-    num_bytes = 3;
-  }
-  AppendToBuffer("%s%c %s,",
-                 mnem,
-                 operand_size_code(),
-                 byte_size_operand_ ? NameOfByteCPURegister(rm)
-                                    : NameOfCPURegister(rm));
+  count += PrintRightOperand(data + count);
   if (op == 0xD2) {
-    AppendToBuffer("cl");
+    AppendToBuffer(", cl");
   } else {
-    AppendToBuffer("%d", imm8);
+    int imm8 = -1;
+    if (op == 0xD0) {
+      imm8 = 1;
+    } else {
+      DCHECK_EQ(0xC0, op);
+      imm8 = *(data + count);
+      count++;
+    }
+    AppendToBuffer(", %d", imm8);
   }
-  return num_bytes;
+  return count;
 }
 
 
@@ -1489,15 +1486,15 @@
 
       case 0x69:  // fall through
       case 0x6B: {
-        int mod, regop, rm;
-        get_modrm(*(data + 1), &mod, &regop, &rm);
-        int32_t imm = *data == 0x6B ? *(data + 2)
-            : *reinterpret_cast<int32_t*>(data + 2);
-        AppendToBuffer("imul%c %s,%s,0x%x",
-                       operand_size_code(),
-                       NameOfCPURegister(regop),
-                       NameOfCPURegister(rm), imm);
-        data += 2 + (*data == 0x6B ? 1 : 4);
+        int count = 1;
+        count += PrintOperands("imul", REG_OPER_OP_ORDER, data + count);
+        AppendToBuffer(",0x");
+        if (*data == 0x69) {
+          count += PrintImmediate(data + count, operand_size());
+        } else {
+          count += PrintImmediate(data + count, OPERAND_BYTE_SIZE);
+        }
+        data += count;
         break;
       }
 
diff --git a/test/cctest/compiler/call-tester.h b/test/cctest/compiler/call-tester.h
index e864160..60b1d25 100644
--- a/test/cctest/compiler/call-tester.h
+++ b/test/cctest/compiler/call-tester.h
@@ -207,7 +207,7 @@
         Simulator::CallArgument::End()};
     return ReturnValueTraits<R>::Cast(CallSimulator(FUNCTION_ADDR(f), args));
   }
-#elif USE_SIMULATOR && V8_TARGET_ARCH_ARM
+#elif USE_SIMULATOR && (V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS)
   uintptr_t CallSimulator(byte* f, int32_t p1 = 0, int32_t p2 = 0,
                           int32_t p3 = 0, int32_t p4 = 0) {
     Simulator* simulator = Simulator::current(isolate_);
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index b4af849..fc0281a 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -2824,6 +2824,16 @@
 }
 
 
+THREADED_TEST(GetIsolate) {
+  LocalContext env;
+  v8::Isolate* isolate = env->GetIsolate();
+  v8::HandleScope scope(isolate);
+  Local<v8::Object> obj = v8::Object::New(isolate);
+  CHECK_EQ(isolate, obj->GetIsolate());
+  CHECK_EQ(isolate, CcTest::global()->GetIsolate());
+}
+
+
 THREADED_TEST(IdentityHash) {
   LocalContext env;
   v8::Isolate* isolate = env->GetIsolate();
@@ -22948,6 +22958,7 @@
   Handle<v8::Promise::Resolver> rr = v8::Promise::Resolver::New(isolate);
   Handle<v8::Promise> p = pr->GetPromise();
   Handle<v8::Promise> r = rr->GetPromise();
+  CHECK_EQ(isolate, p->GetIsolate());
 
   // IsPromise predicate.
   CHECK(p->IsPromise());
diff --git a/test/cctest/test-disasm-ia32.cc b/test/cctest/test-disasm-ia32.cc
index 49088f6..8615d86 100644
--- a/test/cctest/test-disasm-ia32.cc
+++ b/test/cctest/test-disasm-ia32.cc
@@ -201,6 +201,12 @@
   __ rcl(edx, 7);
   __ rcr(edx, 1);
   __ rcr(edx, 7);
+  __ ror(edx, 1);
+  __ ror(edx, 6);
+  __ ror_cl(edx);
+  __ ror(Operand(ebx, ecx, times_4, 10000), 1);
+  __ ror(Operand(ebx, ecx, times_4, 10000), 6);
+  __ ror_cl(Operand(ebx, ecx, times_4, 10000));
   __ sar(edx, 1);
   __ sar(edx, 6);
   __ sar_cl(edx);
diff --git a/test/cctest/test-disasm-x64.cc b/test/cctest/test-disasm-x64.cc
index 480c91f..0a4882e 100644
--- a/test/cctest/test-disasm-x64.cc
+++ b/test/cctest/test-disasm-x64.cc
@@ -117,6 +117,26 @@
   __ imulq(rdx, rcx);
   __ shld(rdx, rcx);
   __ shrd(rdx, rcx);
+  __ shlq(Operand(rdi, rax, times_4, 100), Immediate(1));
+  __ shlq(Operand(rdi, rax, times_4, 100), Immediate(6));
+  __ shlq(Operand(r15, 0), Immediate(1));
+  __ shlq(Operand(r15, 0), Immediate(6));
+  __ shlq_cl(Operand(r15, 0));
+  __ shlq_cl(Operand(r15, 0));
+  __ shlq_cl(Operand(rdi, rax, times_4, 100));
+  __ shlq_cl(Operand(rdi, rax, times_4, 100));
+  __ shlq(rdx, Immediate(1));
+  __ shlq(rdx, Immediate(6));
+  __ shll(Operand(rdi, rax, times_4, 100), Immediate(1));
+  __ shll(Operand(rdi, rax, times_4, 100), Immediate(6));
+  __ shll(Operand(r15, 0), Immediate(1));
+  __ shll(Operand(r15, 0), Immediate(6));
+  __ shll_cl(Operand(r15, 0));
+  __ shll_cl(Operand(r15, 0));
+  __ shll_cl(Operand(rdi, rax, times_4, 100));
+  __ shll_cl(Operand(rdi, rax, times_4, 100));
+  __ shll(rdx, Immediate(1));
+  __ shll(rdx, Immediate(6));
   __ bts(Operand(rdx, 0), rcx);
   __ bts(Operand(rbx, rcx, times_4, 0), rcx);
   __ nop();
@@ -164,10 +184,16 @@
   __ notq(rdx);
   __ testq(Operand(rbx, rcx, times_4, 10000), rdx);
 
-  __ imulq(rdx, Operand(rbx, rcx, times_4, 10000));
   __ imulq(rdx, rcx, Immediate(12));
   __ imulq(rdx, rcx, Immediate(1000));
+  __ imulq(rdx, Operand(rbx, rcx, times_4, 10000));
+  __ imulq(rdx, Operand(rbx, rcx, times_4, 10000), Immediate(12));
   __ imulq(rdx, Operand(rbx, rcx, times_4, 10000), Immediate(1000));
+  __ imull(r15, rcx, Immediate(12));
+  __ imull(r15, rcx, Immediate(1000));
+  __ imull(r15, Operand(rbx, rcx, times_4, 10000));
+  __ imull(r15, Operand(rbx, rcx, times_4, 10000), Immediate(12));
+  __ imull(r15, Operand(rbx, rcx, times_4, 10000), Immediate(1000));
 
   __ incq(rdx);
   __ incq(Operand(rbx, rcx, times_4, 10000));
diff --git a/test/cctest/test-serialize.cc b/test/cctest/test-serialize.cc
index ed9419d..02fac5a 100644
--- a/test/cctest/test-serialize.cc
+++ b/test/cctest/test-serialize.cc
@@ -901,6 +901,216 @@
 }
 
 
+class SerializerOneByteResource
+    : public v8::String::ExternalOneByteStringResource {
+ public:
+  SerializerOneByteResource(const char* data, size_t length)
+      : data_(data), length_(length) {}
+  virtual const char* data() const { return data_; }
+  virtual size_t length() const { return length_; }
+
+ private:
+  const char* data_;
+  size_t length_;
+};
+
+
+class SerializerTwoByteResource : public v8::String::ExternalStringResource {
+ public:
+  SerializerTwoByteResource(const char* data, size_t length)
+      : data_(AsciiToTwoByteString(data)), length_(length) {}
+  ~SerializerTwoByteResource() { DeleteArray<const uint16_t>(data_); }
+
+  virtual const uint16_t* data() const { return data_; }
+  virtual size_t length() const { return length_; }
+
+ private:
+  const uint16_t* data_;
+  size_t length_;
+};
+
+
+TEST(SerializeToplevelExternalString) {
+  FLAG_serialize_toplevel = true;
+  LocalContext context;
+  Isolate* isolate = CcTest::i_isolate();
+  isolate->compilation_cache()->Disable();  // Disable same-isolate code cache.
+
+  v8::HandleScope scope(CcTest::isolate());
+
+  // Obtain external internalized one-byte string.
+  SerializerOneByteResource one_byte_resource("one_byte", 8);
+  Handle<String> one_byte_string =
+      isolate->factory()->NewStringFromAsciiChecked("one_byte");
+  one_byte_string = isolate->factory()->InternalizeString(one_byte_string);
+  one_byte_string->MakeExternal(&one_byte_resource);
+  CHECK(one_byte_string->IsExternalOneByteString());
+  CHECK(one_byte_string->IsInternalizedString());
+
+  // Obtain external internalized two-byte string.
+  SerializerTwoByteResource two_byte_resource("two_byte", 8);
+  Handle<String> two_byte_string =
+      isolate->factory()->NewStringFromAsciiChecked("two_byte");
+  two_byte_string = isolate->factory()->InternalizeString(two_byte_string);
+  two_byte_string->MakeExternal(&two_byte_resource);
+  CHECK(two_byte_string->IsExternalTwoByteString());
+  CHECK(two_byte_string->IsInternalizedString());
+
+  const char* source =
+      "var o = {}               \n"
+      "o.one_byte = 7;          \n"
+      "o.two_byte = 8;          \n"
+      "o.one_byte + o.two_byte; \n";
+  Handle<String> source_string = isolate->factory()
+                                     ->NewStringFromUtf8(CStrVector(source))
+                                     .ToHandleChecked();
+
+  Handle<JSObject> global(isolate->context()->global_object());
+  ScriptData* cache = NULL;
+
+  Handle<SharedFunctionInfo> orig = Compiler::CompileScript(
+      source_string, Handle<String>(), 0, 0, false,
+      Handle<Context>(isolate->native_context()), NULL, &cache,
+      v8::ScriptCompiler::kProduceCodeCache, NOT_NATIVES_CODE);
+
+  Handle<SharedFunctionInfo> copy;
+  {
+    DisallowCompilation no_compile_expected(isolate);
+    copy = Compiler::CompileScript(
+        source_string, Handle<String>(), 0, 0, false,
+        Handle<Context>(isolate->native_context()), NULL, &cache,
+        v8::ScriptCompiler::kConsumeCodeCache, NOT_NATIVES_CODE);
+  }
+  CHECK_NE(*orig, *copy);
+
+  Handle<JSFunction> copy_fun =
+      isolate->factory()->NewFunctionFromSharedFunctionInfo(
+          copy, isolate->native_context());
+
+  Handle<Object> copy_result =
+      Execution::Call(isolate, copy_fun, global, 0, NULL).ToHandleChecked();
+
+  CHECK_EQ(15.0f, copy_result->Number());
+
+  delete cache;
+}
+
+
+TEST(SerializeToplevelLargeExternalString) {
+  FLAG_serialize_toplevel = true;
+  LocalContext context;
+  Isolate* isolate = CcTest::i_isolate();
+  isolate->compilation_cache()->Disable();  // Disable same-isolate code cache.
+
+  Factory* f = isolate->factory();
+
+  v8::HandleScope scope(CcTest::isolate());
+
+  // Create a huge external internalized string to use as variable name.
+  Vector<const uint8_t> string =
+      ConstructSource(STATIC_CHAR_VECTOR(""), STATIC_CHAR_VECTOR("abcdef"),
+                      STATIC_CHAR_VECTOR(""), 1000000);
+  Handle<String> name = f->NewStringFromOneByte(string).ToHandleChecked();
+  SerializerOneByteResource one_byte_resource(
+      reinterpret_cast<const char*>(string.start()), string.length());
+  name = f->InternalizeString(name);
+  name->MakeExternal(&one_byte_resource);
+  CHECK(name->IsExternalOneByteString());
+  CHECK(name->IsInternalizedString());
+  CHECK(isolate->heap()->InSpace(*name, LO_SPACE));
+
+  // Create the source, which is "var <literal> = 42; <literal>".
+  Handle<String> source_str =
+      f->NewConsString(
+             f->NewConsString(f->NewStringFromAsciiChecked("var "), name)
+                 .ToHandleChecked(),
+             f->NewConsString(f->NewStringFromAsciiChecked(" = 42; "), name)
+                 .ToHandleChecked()).ToHandleChecked();
+
+  Handle<JSObject> global(isolate->context()->global_object());
+  ScriptData* cache = NULL;
+
+  Handle<SharedFunctionInfo> orig = Compiler::CompileScript(
+      source_str, Handle<String>(), 0, 0, false,
+      Handle<Context>(isolate->native_context()), NULL, &cache,
+      v8::ScriptCompiler::kProduceCodeCache, NOT_NATIVES_CODE);
+
+  Handle<SharedFunctionInfo> copy;
+  {
+    DisallowCompilation no_compile_expected(isolate);
+    copy = Compiler::CompileScript(
+        source_str, Handle<String>(), 0, 0, false,
+        Handle<Context>(isolate->native_context()), NULL, &cache,
+        v8::ScriptCompiler::kConsumeCodeCache, NOT_NATIVES_CODE);
+  }
+  CHECK_NE(*orig, *copy);
+
+  Handle<JSFunction> copy_fun =
+      f->NewFunctionFromSharedFunctionInfo(copy, isolate->native_context());
+
+  Handle<Object> copy_result =
+      Execution::Call(isolate, copy_fun, global, 0, NULL).ToHandleChecked();
+
+  CHECK_EQ(42.0f, copy_result->Number());
+
+  delete cache;
+  string.Dispose();
+}
+
+
+TEST(SerializeToplevelExternalScriptName) {
+  FLAG_serialize_toplevel = true;
+  LocalContext context;
+  Isolate* isolate = CcTest::i_isolate();
+  isolate->compilation_cache()->Disable();  // Disable same-isolate code cache.
+
+  Factory* f = isolate->factory();
+
+  v8::HandleScope scope(CcTest::isolate());
+
+  const char* source =
+      "var a = [1, 2, 3, 4];"
+      "a.reduce(function(x, y) { return x + y }, 0)";
+
+  Handle<String> source_string =
+      f->NewStringFromUtf8(CStrVector(source)).ToHandleChecked();
+
+  const SerializerOneByteResource one_byte_resource("one_byte", 8);
+  Handle<String> name =
+      f->NewExternalStringFromOneByte(&one_byte_resource).ToHandleChecked();
+  CHECK(name->IsExternalOneByteString());
+  CHECK(!name->IsInternalizedString());
+
+  Handle<JSObject> global(isolate->context()->global_object());
+  ScriptData* cache = NULL;
+
+  Handle<SharedFunctionInfo> orig = Compiler::CompileScript(
+      source_string, name, 0, 0, false,
+      Handle<Context>(isolate->native_context()), NULL, &cache,
+      v8::ScriptCompiler::kProduceCodeCache, NOT_NATIVES_CODE);
+
+  Handle<SharedFunctionInfo> copy;
+  {
+    DisallowCompilation no_compile_expected(isolate);
+    copy = Compiler::CompileScript(
+        source_string, name, 0, 0, false,
+        Handle<Context>(isolate->native_context()), NULL, &cache,
+        v8::ScriptCompiler::kConsumeCodeCache, NOT_NATIVES_CODE);
+  }
+  CHECK_NE(*orig, *copy);
+
+  Handle<JSFunction> copy_fun =
+      f->NewFunctionFromSharedFunctionInfo(copy, isolate->native_context());
+
+  Handle<Object> copy_result =
+      Execution::Call(isolate, copy_fun, global, 0, NULL).ToHandleChecked();
+
+  CHECK_EQ(10.0f, copy_result->Number());
+
+  delete cache;
+}
+
+
 TEST(SerializeToplevelIsolates) {
   FLAG_serialize_toplevel = true;
 
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index 04c0977..aad4573 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -196,8 +196,10 @@
 
   # Skip endain dependent test for mips due to different typed views of the same
   # array buffer.
-  'nans': [PASS, ['arch == mips', SKIP]],
+  'nans': [PASS, ],
 
+  # This test variant makes only sense on arm.
+  'math-floor-of-div-nosudiv': [PASS, SLOW, ['arch not in [arm, arm64, android_arm, android_arm64]', SKIP]],
 }],  # ALWAYS
 
 ##############################################################################
diff --git a/test/unittests/compiler/graph-unittest.cc b/test/unittests/compiler/graph-unittest.cc
index 5160e9a..27f694a 100644
--- a/test/unittests/compiler/graph-unittest.cc
+++ b/test/unittests/compiler/graph-unittest.cc
@@ -7,6 +7,7 @@
 #include <ostream>  // NOLINT(readability/streams)
 
 #include "src/compiler/node-properties-inl.h"
+#include "src/compiler/simplified-operator.h"
 
 using testing::_;
 using testing::MakeMatcher;
@@ -68,8 +69,16 @@
 }
 
 
+Node* GraphTest::HeapConstant(const Handle<HeapObject>& value) {
+  return HeapConstant(Unique<HeapObject>::CreateUninitialized(value));
+}
+
+
 Node* GraphTest::HeapConstant(const Unique<HeapObject>& value) {
-  return graph()->NewNode(common()->HeapConstant(value));
+  Node* node = graph()->NewNode(common()->HeapConstant(value));
+  Type* type = Type::Constant(value.handle(), zone());
+  NodeProperties::SetBounds(node, Bounds(type));
+  return node;
 }
 
 
@@ -85,6 +94,12 @@
 }
 
 
+Node* GraphTest::UndefinedConstant() {
+  return HeapConstant(
+      Unique<HeapObject>::CreateImmovable(factory()->undefined_value()));
+}
+
+
 Matcher<Node*> GraphTest::IsFalseConstant() {
   return IsHeapConstant(
       Unique<HeapObject>::CreateImmovable(factory()->false_value()));
@@ -430,6 +445,172 @@
 };
 
 
+class IsLoadFieldMatcher FINAL : public NodeMatcher {
+ public:
+  IsLoadFieldMatcher(const Matcher<FieldAccess>& access_matcher,
+                     const Matcher<Node*>& base_matcher,
+                     const Matcher<Node*>& effect_matcher)
+      : NodeMatcher(IrOpcode::kLoadField),
+        access_matcher_(access_matcher),
+        base_matcher_(base_matcher),
+        effect_matcher_(effect_matcher) {}
+
+  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+    NodeMatcher::DescribeTo(os);
+    *os << " whose access (";
+    access_matcher_.DescribeTo(os);
+    *os << "), base (";
+    base_matcher_.DescribeTo(os);
+    *os << ") and effect (";
+    effect_matcher_.DescribeTo(os);
+    *os << ")";
+  }
+
+  virtual bool MatchAndExplain(Node* node,
+                               MatchResultListener* listener) const OVERRIDE {
+    return (NodeMatcher::MatchAndExplain(node, listener) &&
+            PrintMatchAndExplain(OpParameter<FieldAccess>(node), "access",
+                                 access_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
+                                 base_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+                                 effect_matcher_, listener));
+  }
+
+ private:
+  const Matcher<FieldAccess> access_matcher_;
+  const Matcher<Node*> base_matcher_;
+  const Matcher<Node*> effect_matcher_;
+};
+
+
+class IsLoadElementMatcher FINAL : public NodeMatcher {
+ public:
+  IsLoadElementMatcher(const Matcher<ElementAccess>& access_matcher,
+                       const Matcher<Node*>& base_matcher,
+                       const Matcher<Node*>& index_matcher,
+                       const Matcher<Node*>& length_matcher,
+                       const Matcher<Node*>& effect_matcher,
+                       const Matcher<Node*>& control_matcher)
+      : NodeMatcher(IrOpcode::kLoadElement),
+        access_matcher_(access_matcher),
+        base_matcher_(base_matcher),
+        index_matcher_(index_matcher),
+        length_matcher_(length_matcher),
+        effect_matcher_(effect_matcher),
+        control_matcher_(control_matcher) {}
+
+  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+    NodeMatcher::DescribeTo(os);
+    *os << " whose access (";
+    access_matcher_.DescribeTo(os);
+    *os << "), base (";
+    base_matcher_.DescribeTo(os);
+    *os << "), index (";
+    index_matcher_.DescribeTo(os);
+    *os << "), length (";
+    length_matcher_.DescribeTo(os);
+    *os << "), effect (";
+    effect_matcher_.DescribeTo(os);
+    *os << ") and control (";
+    control_matcher_.DescribeTo(os);
+    *os << ")";
+  }
+
+  virtual bool MatchAndExplain(Node* node,
+                               MatchResultListener* listener) const OVERRIDE {
+    return (NodeMatcher::MatchAndExplain(node, listener) &&
+            PrintMatchAndExplain(OpParameter<ElementAccess>(node), "access",
+                                 access_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
+                                 base_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
+                                 "index", index_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
+                                 "length", length_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+                                 effect_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+                                 "control", control_matcher_, listener));
+  }
+
+ private:
+  const Matcher<ElementAccess> access_matcher_;
+  const Matcher<Node*> base_matcher_;
+  const Matcher<Node*> index_matcher_;
+  const Matcher<Node*> length_matcher_;
+  const Matcher<Node*> effect_matcher_;
+  const Matcher<Node*> control_matcher_;
+};
+
+
+class IsStoreElementMatcher FINAL : public NodeMatcher {
+ public:
+  IsStoreElementMatcher(const Matcher<ElementAccess>& access_matcher,
+                        const Matcher<Node*>& base_matcher,
+                        const Matcher<Node*>& index_matcher,
+                        const Matcher<Node*>& length_matcher,
+                        const Matcher<Node*>& value_matcher,
+                        const Matcher<Node*>& effect_matcher,
+                        const Matcher<Node*>& control_matcher)
+      : NodeMatcher(IrOpcode::kStoreElement),
+        access_matcher_(access_matcher),
+        base_matcher_(base_matcher),
+        index_matcher_(index_matcher),
+        length_matcher_(length_matcher),
+        value_matcher_(value_matcher),
+        effect_matcher_(effect_matcher),
+        control_matcher_(control_matcher) {}
+
+  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+    NodeMatcher::DescribeTo(os);
+    *os << " whose access (";
+    access_matcher_.DescribeTo(os);
+    *os << "), base (";
+    base_matcher_.DescribeTo(os);
+    *os << "), index (";
+    index_matcher_.DescribeTo(os);
+    *os << "), length (";
+    length_matcher_.DescribeTo(os);
+    *os << "), value (";
+    value_matcher_.DescribeTo(os);
+    *os << "), effect (";
+    effect_matcher_.DescribeTo(os);
+    *os << ") and control (";
+    control_matcher_.DescribeTo(os);
+    *os << ")";
+  }
+
+  virtual bool MatchAndExplain(Node* node,
+                               MatchResultListener* listener) const OVERRIDE {
+    return (NodeMatcher::MatchAndExplain(node, listener) &&
+            PrintMatchAndExplain(OpParameter<ElementAccess>(node), "access",
+                                 access_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
+                                 base_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
+                                 "index", index_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
+                                 "length", length_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 3),
+                                 "value", value_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+                                 effect_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+                                 "control", control_matcher_, listener));
+  }
+
+ private:
+  const Matcher<ElementAccess> access_matcher_;
+  const Matcher<Node*> base_matcher_;
+  const Matcher<Node*> index_matcher_;
+  const Matcher<Node*> length_matcher_;
+  const Matcher<Node*> value_matcher_;
+  const Matcher<Node*> effect_matcher_;
+  const Matcher<Node*> control_matcher_;
+};
+
+
 class IsLoadMatcher FINAL : public NodeMatcher {
  public:
   IsLoadMatcher(const Matcher<LoadRepresentation>& rep_matcher,
@@ -715,6 +896,39 @@
 }
 
 
+Matcher<Node*> IsLoadField(const Matcher<FieldAccess>& access_matcher,
+                           const Matcher<Node*>& base_matcher,
+                           const Matcher<Node*>& effect_matcher) {
+  return MakeMatcher(
+      new IsLoadFieldMatcher(access_matcher, base_matcher, effect_matcher));
+}
+
+
+Matcher<Node*> IsLoadElement(const Matcher<ElementAccess>& access_matcher,
+                             const Matcher<Node*>& base_matcher,
+                             const Matcher<Node*>& index_matcher,
+                             const Matcher<Node*>& length_matcher,
+                             const Matcher<Node*>& effect_matcher,
+                             const Matcher<Node*>& control_matcher) {
+  return MakeMatcher(new IsLoadElementMatcher(access_matcher, base_matcher,
+                                              index_matcher, length_matcher,
+                                              effect_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsStoreElement(const Matcher<ElementAccess>& access_matcher,
+                              const Matcher<Node*>& base_matcher,
+                              const Matcher<Node*>& index_matcher,
+                              const Matcher<Node*>& length_matcher,
+                              const Matcher<Node*>& value_matcher,
+                              const Matcher<Node*>& effect_matcher,
+                              const Matcher<Node*>& control_matcher) {
+  return MakeMatcher(new IsStoreElementMatcher(
+      access_matcher, base_matcher, index_matcher, length_matcher,
+      value_matcher, effect_matcher, control_matcher));
+}
+
+
 Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
                       const Matcher<Node*>& base_matcher,
                       const Matcher<Node*>& index_matcher,
diff --git a/test/unittests/compiler/graph-unittest.h b/test/unittests/compiler/graph-unittest.h
index 18d3ca5..41a91f8 100644
--- a/test/unittests/compiler/graph-unittest.h
+++ b/test/unittests/compiler/graph-unittest.h
@@ -15,12 +15,19 @@
 namespace internal {
 
 // Forward declarations.
+template <class T>
+class Handle;
 class HeapObject;
 template <class T>
 class Unique;
 
 namespace compiler {
 
+// Forward declarations.
+struct ElementAccess;
+struct FieldAccess;
+
+
 using ::testing::Matcher;
 
 
@@ -36,9 +43,11 @@
   Node* Int32Constant(int32_t value);
   Node* Int64Constant(int64_t value);
   Node* NumberConstant(volatile double value);
+  Node* HeapConstant(const Handle<HeapObject>& value);
   Node* HeapConstant(const Unique<HeapObject>& value);
   Node* FalseConstant();
   Node* TrueConstant();
+  Node* UndefinedConstant();
 
   Matcher<Node*> IsFalseConstant();
   Matcher<Node*> IsTrueConstant();
@@ -88,6 +97,22 @@
                                 const Matcher<Node*>& rhs_matcher);
 Matcher<Node*> IsNumberSubtract(const Matcher<Node*>& lhs_matcher,
                                 const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsLoadField(const Matcher<FieldAccess>& access_matcher,
+                           const Matcher<Node*>& base_matcher,
+                           const Matcher<Node*>& effect_matcher);
+Matcher<Node*> IsLoadElement(const Matcher<ElementAccess>& access_matcher,
+                             const Matcher<Node*>& base_matcher,
+                             const Matcher<Node*>& index_matcher,
+                             const Matcher<Node*>& length_matcher,
+                             const Matcher<Node*>& effect_matcher,
+                             const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsStoreElement(const Matcher<ElementAccess>& access_matcher,
+                              const Matcher<Node*>& base_matcher,
+                              const Matcher<Node*>& index_matcher,
+                              const Matcher<Node*>& length_matcher,
+                              const Matcher<Node*>& value_matcher,
+                              const Matcher<Node*>& effect_matcher,
+                              const Matcher<Node*>& control_matcher);
 
 Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
                       const Matcher<Node*>& base_matcher,
diff --git a/test/unittests/compiler/js-builtin-reducer-unittest.cc b/test/unittests/compiler/js-builtin-reducer-unittest.cc
index c72978f..541dd09 100644
--- a/test/unittests/compiler/js-builtin-reducer-unittest.cc
+++ b/test/unittests/compiler/js-builtin-reducer-unittest.cc
@@ -35,11 +35,6 @@
     return n;
   }
 
-  Node* UndefinedConstant() {
-    return HeapConstant(
-        Unique<HeapObject>::CreateImmovable(factory()->undefined_value()));
-  }
-
   JSOperatorBuilder* javascript() { return &javascript_; }
 
  private:
diff --git a/test/unittests/compiler/js-typed-lowering-unittest.cc b/test/unittests/compiler/js-typed-lowering-unittest.cc
new file mode 100644
index 0000000..c9b0830
--- /dev/null
+++ b/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -0,0 +1,151 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/access-builder.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/js-typed-lowering.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/typer.h"
+#include "test/unittests/compiler/compiler-test-utils.h"
+#include "test/unittests/compiler/graph-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+const ExternalArrayType kExternalArrayTypes[] = {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) kExternal##Type##Array,
+    TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+};
+
+
+const StrictMode kStrictModes[] = {SLOPPY, STRICT};
+
+}  // namespace
+
+
+class JSTypedLoweringTest : public GraphTest {
+ public:
+  JSTypedLoweringTest() : GraphTest(3), javascript_(zone()) {}
+  virtual ~JSTypedLoweringTest() {}
+
+ protected:
+  Reduction Reduce(Node* node) {
+    Typer typer(zone());
+    MachineOperatorBuilder machine;
+    JSGraph jsgraph(graph(), common(), javascript(), &typer, &machine);
+    JSTypedLowering reducer(&jsgraph);
+    return reducer.Reduce(node);
+  }
+
+  Node* Parameter(Type* type, int index = 0) {
+    Node* node = graph()->NewNode(common()->Parameter(index), graph()->start());
+    NodeProperties::SetBounds(node, Bounds(Type::None(), type));
+    return node;
+  }
+
+  Handle<JSArrayBuffer> NewArrayBuffer(void* bytes, size_t byte_length) {
+    Handle<JSArrayBuffer> buffer = factory()->NewJSArrayBuffer();
+    Runtime::SetupArrayBuffer(isolate(), buffer, true, bytes, byte_length);
+    return buffer;
+  }
+
+  JSOperatorBuilder* javascript() { return &javascript_; }
+
+ private:
+  JSOperatorBuilder javascript_;
+};
+
+
+// -----------------------------------------------------------------------------
+// JSLoadProperty
+
+
+TEST_F(JSTypedLoweringTest, JSLoadPropertyFromExternalTypedArray) {
+  const size_t kLength = 17;
+  uint8_t backing_store[kLength * 8];
+  Handle<JSArrayBuffer> buffer =
+      NewArrayBuffer(backing_store, arraysize(backing_store));
+  TRACED_FOREACH(ExternalArrayType, type, kExternalArrayTypes) {
+    Handle<JSTypedArray> array =
+        factory()->NewJSTypedArray(type, buffer, kLength);
+
+    Node* key = Parameter(Type::Integral32());
+    Node* context = UndefinedConstant();
+    Node* effect = graph()->start();
+    Node* control = graph()->start();
+    Node* node = graph()->NewNode(javascript()->LoadProperty(),
+                                  HeapConstant(array), key, context);
+    if (FLAG_turbo_deoptimization) {
+      node->AppendInput(zone(), UndefinedConstant());
+    }
+    node->AppendInput(zone(), effect);
+    node->AppendInput(zone(), control);
+    Reduction r = Reduce(node);
+
+    ASSERT_TRUE(r.Changed());
+    EXPECT_THAT(
+        r.replacement(),
+        IsLoadElement(
+            AccessBuilder::ForTypedArrayElement(type, true),
+            IsLoadField(
+                AccessBuilder::ForJSArrayBufferBackingStore(),
+                IsHeapConstant(Unique<HeapObject>::CreateImmovable(buffer)),
+                effect),
+            key, IsInt32Constant(static_cast<int>(kLength)), effect, control));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// JSStoreProperty
+
+
+TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArray) {
+  const size_t kLength = 17;
+  uint8_t backing_store[kLength * 8];
+  Handle<JSArrayBuffer> buffer =
+      NewArrayBuffer(backing_store, arraysize(backing_store));
+  TRACED_FOREACH(ExternalArrayType, type, kExternalArrayTypes) {
+    TRACED_FOREACH(StrictMode, strict_mode, kStrictModes) {
+      Handle<JSTypedArray> array =
+          factory()->NewJSTypedArray(type, buffer, kLength);
+
+      Node* key = Parameter(Type::Integral32());
+      Node* value = Parameter(Type::Any());
+      Node* context = UndefinedConstant();
+      Node* effect = graph()->start();
+      Node* control = graph()->start();
+      Node* node = graph()->NewNode(javascript()->StoreProperty(strict_mode),
+                                    HeapConstant(array), key, value, context);
+      if (FLAG_turbo_deoptimization) {
+        node->AppendInput(zone(), UndefinedConstant());
+      }
+      node->AppendInput(zone(), effect);
+      node->AppendInput(zone(), control);
+      Reduction r = Reduce(node);
+
+      ASSERT_TRUE(r.Changed());
+      EXPECT_THAT(
+          r.replacement(),
+          IsStoreElement(
+              AccessBuilder::ForTypedArrayElement(type, true),
+              IsLoadField(
+                  AccessBuilder::ForJSArrayBufferBackingStore(),
+                  IsHeapConstant(Unique<HeapObject>::CreateImmovable(buffer)),
+                  effect),
+              key, IsInt32Constant(static_cast<int>(kLength)), value, effect,
+              control));
+    }
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc b/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
new file mode 100644
index 0000000..f5891d2
--- /dev/null
+++ b/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
@@ -0,0 +1,807 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file
+
+#include "test/unittests/compiler/instruction-selector-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+template <typename T>
+struct MachInst {
+  T constructor;
+  const char* constructor_name;
+  ArchOpcode arch_opcode;
+  MachineType machine_type;
+};
+
+template <typename T>
+std::ostream& operator<<(std::ostream& os, const MachInst<T>& mi) {
+  return os << mi.constructor_name;
+}
+
+typedef MachInst<Node* (RawMachineAssembler::*)(Node*)> MachInst1;
+typedef MachInst<Node* (RawMachineAssembler::*)(Node*, Node*)> MachInst2;
+
+// To avoid duplicated code IntCmp helper structure
+// is created. It contains MachInst2 with two nodes and expected_size
+// because different cmp instructions have different size.
+struct IntCmp {
+  MachInst2 mi;
+  uint32_t expected_size;
+};
+
+struct FPCmp {
+  MachInst2 mi;
+  FlagsCondition cond;
+};
+
+const FPCmp kFPCmpInstructions[] = {
+    {{&RawMachineAssembler::Float64Equal, "Float64Equal", kMipsCmpD,
+      kMachFloat64},
+     kUnorderedEqual},
+    {{&RawMachineAssembler::Float64LessThan, "Float64LessThan", kMipsCmpD,
+      kMachFloat64},
+     kUnorderedLessThan},
+    {{&RawMachineAssembler::Float64LessThanOrEqual, "Float64LessThanOrEqual",
+      kMipsCmpD, kMachFloat64},
+     kUnorderedLessThanOrEqual},
+    {{&RawMachineAssembler::Float64GreaterThan, "Float64GreaterThan", kMipsCmpD,
+      kMachFloat64},
+     kUnorderedLessThan},
+    {{&RawMachineAssembler::Float64GreaterThanOrEqual,
+      "Float64GreaterThanOrEqual", kMipsCmpD, kMachFloat64},
+     kUnorderedLessThanOrEqual}};
+
+struct Conversion {
+  // The machine_type field in MachInst1 represents the destination type.
+  MachInst1 mi;
+  MachineType src_machine_type;
+};
+
+
+// ----------------------------------------------------------------------------
+// Logical instructions.
+// ----------------------------------------------------------------------------
+
+
+const MachInst2 kLogicalInstructions[] = {
+    {&RawMachineAssembler::WordAnd, "WordAnd", kMipsAnd, kMachInt16},
+    {&RawMachineAssembler::WordOr, "WordOr", kMipsOr, kMachInt16},
+    {&RawMachineAssembler::WordXor, "WordXor", kMipsXor, kMachInt16},
+    {&RawMachineAssembler::Word32And, "Word32And", kMipsAnd, kMachInt32},
+    {&RawMachineAssembler::Word32Or, "Word32Or", kMipsOr, kMachInt32},
+    {&RawMachineAssembler::Word32Xor, "Word32Xor", kMipsXor, kMachInt32}};
+
+
+// ----------------------------------------------------------------------------
+// Shift instructions.
+// ----------------------------------------------------------------------------
+
+
+const MachInst2 kShiftInstructions[] = {
+    {&RawMachineAssembler::WordShl, "WordShl", kMipsShl, kMachInt16},
+    {&RawMachineAssembler::WordShr, "WordShr", kMipsShr, kMachInt16},
+    {&RawMachineAssembler::WordSar, "WordSar", kMipsSar, kMachInt16},
+    {&RawMachineAssembler::WordRor, "WordRor", kMipsRor, kMachInt16},
+    {&RawMachineAssembler::Word32Shl, "Word32Shl", kMipsShl, kMachInt32},
+    {&RawMachineAssembler::Word32Shr, "Word32Shr", kMipsShr, kMachInt32},
+    {&RawMachineAssembler::Word32Sar, "Word32Sar", kMipsSar, kMachInt32},
+    {&RawMachineAssembler::Word32Ror, "Word32Ror", kMipsRor, kMachInt32}};
+
+
+// ----------------------------------------------------------------------------
+// MUL/DIV instructions.
+// ----------------------------------------------------------------------------
+
+
+const MachInst2 kMulDivInstructions[] = {
+    {&RawMachineAssembler::Int32Mul, "Int32Mul", kMipsMul, kMachInt32},
+    {&RawMachineAssembler::Int32Div, "Int32Div", kMipsDiv, kMachInt32},
+    {&RawMachineAssembler::Uint32Div, "Uint32Div", kMipsDivU, kMachUint32},
+    {&RawMachineAssembler::Float64Mul, "Float64Mul", kMipsMulD, kMachFloat64},
+    {&RawMachineAssembler::Float64Div, "Float64Div", kMipsDivD, kMachFloat64}};
+
+
+// ----------------------------------------------------------------------------
+// MOD instructions.
+// ----------------------------------------------------------------------------
+
+
+const MachInst2 kModInstructions[] = {
+    {&RawMachineAssembler::Int32Mod, "Int32Mod", kMipsMod, kMachInt32},
+    {&RawMachineAssembler::Uint32Mod, "Int32UMod", kMipsModU, kMachInt32},
+    {&RawMachineAssembler::Float64Mod, "Float64Mod", kMipsModD, kMachFloat64}};
+
+
+// ----------------------------------------------------------------------------
+// Arithmetic FPU instructions.
+// ----------------------------------------------------------------------------
+
+
+const MachInst2 kFPArithInstructions[] = {
+    {&RawMachineAssembler::Float64Add, "Float64Add", kMipsAddD, kMachFloat64},
+    {&RawMachineAssembler::Float64Sub, "Float64Sub", kMipsSubD, kMachFloat64}};
+
+
+// ----------------------------------------------------------------------------
+// IntArithTest instructions, two nodes.
+// ----------------------------------------------------------------------------
+
+
+const MachInst2 kAddSubInstructions[] = {
+    {&RawMachineAssembler::Int32Add, "Int32Add", kMipsAdd, kMachInt32},
+    {&RawMachineAssembler::Int32Sub, "Int32Sub", kMipsSub, kMachInt32},
+    {&RawMachineAssembler::Int32AddWithOverflow, "Int32AddWithOverflow",
+     kMipsAddOvf, kMachInt32},
+    {&RawMachineAssembler::Int32SubWithOverflow, "Int32SubWithOverflow",
+     kMipsSubOvf, kMachInt32}};
+
+
+// ----------------------------------------------------------------------------
+// IntArithTest instructions, one node.
+// ----------------------------------------------------------------------------
+
+
+const MachInst1 kAddSubOneInstructions[] = {
+    {&RawMachineAssembler::Int32Neg, "Int32Neg", kMipsSub, kMachInt32},
+    // TODO(dusmil): check this ...
+    // {&RawMachineAssembler::WordEqual  , "WordEqual"  , kMipsTst, kMachInt32}
+};
+
+
+// ----------------------------------------------------------------------------
+// Arithmetic compare instructions.
+// ----------------------------------------------------------------------------
+
+
+const IntCmp kCmpInstructions[] = {
+    {{&RawMachineAssembler::WordEqual, "WordEqual", kMipsCmp, kMachInt16}, 1U},
+    {{&RawMachineAssembler::WordNotEqual, "WordNotEqual", kMipsCmp, kMachInt16},
+     2U},
+    {{&RawMachineAssembler::Word32Equal, "Word32Equal", kMipsCmp, kMachInt32},
+     1U},
+    {{&RawMachineAssembler::Word32NotEqual, "Word32NotEqual", kMipsCmp,
+      kMachInt32},
+     2U},
+    {{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kMipsCmp,
+      kMachInt32},
+     1U},
+    {{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
+      kMipsCmp, kMachInt32},
+     1U},
+    {{&RawMachineAssembler::Int32GreaterThan, "Int32GreaterThan", kMipsCmp,
+      kMachInt32},
+     1U},
+    {{&RawMachineAssembler::Int32GreaterThanOrEqual, "Int32GreaterThanOrEqual",
+      kMipsCmp, kMachInt32},
+     1U},
+    {{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kMipsCmp,
+      kMachUint32},
+     1U},
+    {{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
+      kMipsCmp, kMachUint32},
+     1U}};
+
+
+// ----------------------------------------------------------------------------
+// Conversion instructions.
+// ----------------------------------------------------------------------------
+
+const Conversion kConversionInstructions[] = {
+    // Conversion instructions are related to machine_operator.h:
+    // FPU conversions:
+    // Convert representation of integers between float64 and int32/uint32.
+    // The precise rounding mode and handling of out of range inputs are *not*
+    // defined for these operators, since they are intended only for use with
+    // integers.
+    // mips instruction: cvt_d_w
+    {{&RawMachineAssembler::ChangeInt32ToFloat64, "ChangeInt32ToFloat64",
+      kMipsCvtDW, kMachFloat64},
+     kMachInt32},
+
+    // mips instruction: cvt_d_uw
+    {{&RawMachineAssembler::ChangeUint32ToFloat64, "ChangeUint32ToFloat64",
+      kMipsCvtDUw, kMachFloat64},
+     kMachInt32},
+
+    // mips instruction: trunc_w_d
+    {{&RawMachineAssembler::ChangeFloat64ToInt32, "ChangeFloat64ToInt32",
+      kMipsTruncWD, kMachFloat64},
+     kMachInt32},
+
+    // mips instruction: trunc_uw_d
+    {{&RawMachineAssembler::ChangeFloat64ToUint32, "ChangeFloat64ToUint32",
+      kMipsTruncUwD, kMachFloat64},
+     kMachInt32}};
+
+}  // namespace
+
+
+typedef InstructionSelectorTestWithParam<FPCmp> InstructionSelectorFPCmpTest;
+
+
+TEST_P(InstructionSelectorFPCmpTest, Parameter) {
+  const FPCmp cmp = GetParam();
+  StreamBuilder m(this, kMachInt32, cmp.mi.machine_type, cmp.mi.machine_type);
+  m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(cmp.mi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+  EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFPCmpTest,
+                        ::testing::ValuesIn(kFPCmpInstructions));
+
+
+// ----------------------------------------------------------------------------
+// Arithmetic compare instructions integers.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<IntCmp> InstructionSelectorCmpTest;
+
+
+TEST_P(InstructionSelectorCmpTest, Parameter) {
+  const IntCmp cmp = GetParam();
+  const MachineType type = cmp.mi.machine_type;
+  StreamBuilder m(this, type, type, type);
+  m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(cmp.expected_size, s.size());
+  EXPECT_EQ(cmp.mi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorCmpTest,
+                        ::testing::ValuesIn(kCmpInstructions));
+
+
+// ----------------------------------------------------------------------------
+// Shift instructions.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+    InstructionSelectorShiftTest;
+
+
+TEST_P(InstructionSelectorShiftTest, Immediate) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  TRACED_FORRANGE(int32_t, imm, 0, (ElementSizeOf(type) * 8) - 1) {
+    StreamBuilder m(this, type, type);
+    m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(2U, s[0]->InputCount());
+    EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
+                        ::testing::ValuesIn(kShiftInstructions));
+
+
+// ----------------------------------------------------------------------------
+// Logical instructions.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+    InstructionSelectorLogicalTest;
+
+
+TEST_P(InstructionSelectorLogicalTest, Parameter) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  StreamBuilder m(this, type, type, type);
+  m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorLogicalTest,
+                        ::testing::ValuesIn(kLogicalInstructions));
+
+
+// ----------------------------------------------------------------------------
+// MUL/DIV instructions.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+    InstructionSelectorMulDivTest;
+
+
+TEST_P(InstructionSelectorMulDivTest, Parameter) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  StreamBuilder m(this, type, type, type);
+  m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorMulDivTest,
+                        ::testing::ValuesIn(kMulDivInstructions));
+
+
+// ----------------------------------------------------------------------------
+// MOD instructions.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<MachInst2> InstructionSelectorModTest;
+
+
+TEST_P(InstructionSelectorModTest, Parameter) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  StreamBuilder m(this, type, type, type);
+  m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorModTest,
+                        ::testing::ValuesIn(kModInstructions));
+
+
+// ----------------------------------------------------------------------------
+// Floating point instructions.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+    InstructionSelectorFPArithTest;
+
+
+TEST_P(InstructionSelectorFPArithTest, Parameter) {
+  const MachInst2 fpa = GetParam();
+  StreamBuilder m(this, fpa.machine_type, fpa.machine_type, fpa.machine_type);
+  m.Return((m.*fpa.constructor)(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(fpa.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFPArithTest,
+                        ::testing::ValuesIn(kFPArithInstructions));
+
+
+// ----------------------------------------------------------------------------
+// Integer arithmetic.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+    InstructionSelectorIntArithTwoTest;
+
+
+TEST_P(InstructionSelectorIntArithTwoTest, Parameter) {
+  const MachInst2 intpa = GetParam();
+  StreamBuilder m(this, intpa.machine_type, intpa.machine_type,
+                  intpa.machine_type);
+  m.Return((m.*intpa.constructor)(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(intpa.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+                        InstructionSelectorIntArithTwoTest,
+                        ::testing::ValuesIn(kAddSubInstructions));
+
+
+// ----------------------------------------------------------------------------
+// One node.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<MachInst1>
+    InstructionSelectorIntArithOneTest;
+
+
+TEST_P(InstructionSelectorIntArithOneTest, Parameter) {
+  const MachInst1 intpa = GetParam();
+  StreamBuilder m(this, intpa.machine_type, intpa.machine_type,
+                  intpa.machine_type);
+  m.Return((m.*intpa.constructor)(m.Parameter(0)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(intpa.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+                        InstructionSelectorIntArithOneTest,
+                        ::testing::ValuesIn(kAddSubOneInstructions));
+
+
+// ----------------------------------------------------------------------------
+// Conversions.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<Conversion>
+    InstructionSelectorConversionTest;
+
+
+TEST_P(InstructionSelectorConversionTest, Parameter) {
+  const Conversion conv = GetParam();
+  StreamBuilder m(this, conv.mi.machine_type, conv.src_machine_type);
+  m.Return((m.*conv.mi.constructor)(m.Parameter(0)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(conv.mi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(1U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+                        InstructionSelectorConversionTest,
+                        ::testing::ValuesIn(kConversionInstructions));
+
+
+// ----------------------------------------------------------------------------
+// Loads and stores.
+// ----------------------------------------------------------------------------
+
+namespace {
+
+struct MemoryAccess {
+  MachineType type;
+  ArchOpcode load_opcode;
+  ArchOpcode store_opcode;
+};
+
+
+static const MemoryAccess kMemoryAccesses[] = {
+    {kMachInt8, kMipsLb, kMipsSb},
+    {kMachUint8, kMipsLbu, kMipsSb},
+    {kMachInt16, kMipsLh, kMipsSh},
+    {kMachUint16, kMipsLhu, kMipsSh},
+    {kMachInt32, kMipsLw, kMipsSw},
+    {kRepFloat32, kMipsLwc1, kMipsSwc1},
+    {kRepFloat64, kMipsLdc1, kMipsSdc1}};
+
+
+struct MemoryAccessImm {
+  MachineType type;
+  ArchOpcode load_opcode;
+  ArchOpcode store_opcode;
+  bool (InstructionSelectorTest::Stream::*val_predicate)(
+      const InstructionOperand*) const;
+  const int32_t immediates[40];
+};
+
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccessImm& acc) {
+  return os << acc.type;
+}
+
+
+struct MemoryAccessImm1 {
+  MachineType type;
+  ArchOpcode load_opcode;
+  ArchOpcode store_opcode;
+  bool (InstructionSelectorTest::Stream::*val_predicate)(
+      const InstructionOperand*) const;
+  const int32_t immediates[5];
+};
+
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccessImm1& acc) {
+  return os << acc.type;
+}
+
+
+// ----------------------------------------------------------------------------
+// Loads and stores immediate values.
+// ----------------------------------------------------------------------------
+
+
+const MemoryAccessImm kMemoryAccessesImm[] = {
+    {kMachInt8,
+     kMipsLb,
+     kMipsSb,
+     &InstructionSelectorTest::Stream::IsInteger,
+     {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+      -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+      115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+    {kMachUint8,
+     kMipsLbu,
+     kMipsSb,
+     &InstructionSelectorTest::Stream::IsInteger,
+     {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+      -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+      115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+    {kMachInt16,
+     kMipsLh,
+     kMipsSh,
+     &InstructionSelectorTest::Stream::IsInteger,
+     {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+      -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+      115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+    {kMachUint16,
+     kMipsLhu,
+     kMipsSh,
+     &InstructionSelectorTest::Stream::IsInteger,
+     {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+      -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+      115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+    {kMachInt32,
+     kMipsLw,
+     kMipsSw,
+     &InstructionSelectorTest::Stream::IsInteger,
+     {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+      -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+      115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+    {kMachFloat32,
+     kMipsLwc1,
+     kMipsSwc1,
+     &InstructionSelectorTest::Stream::IsDouble,
+     {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+      -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+      115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+    {kMachFloat64,
+     kMipsLdc1,
+     kMipsSdc1,
+     &InstructionSelectorTest::Stream::IsDouble,
+     {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+      -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+      115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}}};
+
+
+const MemoryAccessImm1 kMemoryAccessImmMoreThan16bit[] = {
+    {kMachInt8,
+     kMipsLb,
+     kMipsSb,
+     &InstructionSelectorTest::Stream::IsInteger,
+     {-65000, -55000, 32777, 55000, 65000}},
+    {kMachInt8,
+     kMipsLbu,
+     kMipsSb,
+     &InstructionSelectorTest::Stream::IsInteger,
+     {-65000, -55000, 32777, 55000, 65000}},
+    {kMachInt16,
+     kMipsLh,
+     kMipsSh,
+     &InstructionSelectorTest::Stream::IsInteger,
+     {-65000, -55000, 32777, 55000, 65000}},
+    {kMachInt16,
+     kMipsLhu,
+     kMipsSh,
+     &InstructionSelectorTest::Stream::IsInteger,
+     {-65000, -55000, 32777, 55000, 65000}},
+    {kMachInt32,
+     kMipsLw,
+     kMipsSw,
+     &InstructionSelectorTest::Stream::IsInteger,
+     {-65000, -55000, 32777, 55000, 65000}},
+    {kMachFloat32,
+     kMipsLwc1,
+     kMipsSwc1,
+     &InstructionSelectorTest::Stream::IsDouble,
+     {-65000, -55000, 32777, 55000, 65000}},
+    {kMachFloat64,
+     kMipsLdc1,
+     kMipsSdc1,
+     &InstructionSelectorTest::Stream::IsDouble,
+     {-65000, -55000, 32777, 55000, 65000}}};
+
+}  // namespace
+
+
+typedef InstructionSelectorTestWithParam<MemoryAccess>
+    InstructionSelectorMemoryAccessTest;
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
+  const MemoryAccess memacc = GetParam();
+  StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+  m.Return(m.Load(memacc.type, m.Parameter(0)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
+  const MemoryAccess memacc = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
+  m.Store(memacc.type, m.Parameter(0), m.Parameter(1));
+  m.Return(m.Int32Constant(0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+                        InstructionSelectorMemoryAccessTest,
+                        ::testing::ValuesIn(kMemoryAccesses));
+
+
+// ----------------------------------------------------------------------------
+// Load immediate.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<MemoryAccessImm>
+    InstructionSelectorMemoryAccessImmTest;
+
+
+TEST_P(InstructionSelectorMemoryAccessImmTest, LoadWithImmediateIndex) {
+  const MemoryAccessImm memacc = GetParam();
+  TRACED_FOREACH(int32_t, index, memacc.immediates) {
+    StreamBuilder m(this, memacc.type, kMachPtr);
+    m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+    EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_TRUE((s.*memacc.val_predicate)(s[0]->Output()));
+  }
+}
+
+
+// ----------------------------------------------------------------------------
+// Store immediate.
+// ----------------------------------------------------------------------------
+
+
+TEST_P(InstructionSelectorMemoryAccessImmTest, StoreWithImmediateIndex) {
+  const MemoryAccessImm memacc = GetParam();
+  TRACED_FOREACH(int32_t, index, memacc.immediates) {
+    StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
+    m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
+            m.Parameter(1));
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+    EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(0U, s[0]->OutputCount());
+  }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+                        InstructionSelectorMemoryAccessImmTest,
+                        ::testing::ValuesIn(kMemoryAccessesImm));
+
+
+// ----------------------------------------------------------------------------
+// Load/store offsets more than 16 bits.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<MemoryAccessImm1>
+    InstructionSelectorMemoryAccessImmMoreThan16bitTest;
+
+
+TEST_P(InstructionSelectorMemoryAccessImmMoreThan16bitTest,
+       LoadWithImmediateIndex) {
+  const MemoryAccessImm1 memacc = GetParam();
+  TRACED_FOREACH(int32_t, index, memacc.immediates) {
+    StreamBuilder m(this, memacc.type, kMachPtr);
+    m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
+    Stream s = m.Build();
+    ASSERT_EQ(2U, s.size());
+    // kMipsAdd is expected opcode.
+    // size more than 16 bits wide.
+    EXPECT_EQ(kMipsAdd, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+    EXPECT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessImmMoreThan16bitTest,
+       StoreWithImmediateIndex) {
+  const MemoryAccessImm1 memacc = GetParam();
+  TRACED_FOREACH(int32_t, index, memacc.immediates) {
+    StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
+    m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
+            m.Parameter(1));
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(2U, s.size());
+    // kMipsAdd is expected opcode
+    // size more than 16 bits wide
+    EXPECT_EQ(kMipsAdd, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+    EXPECT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+                        InstructionSelectorMemoryAccessImmMoreThan16bitTest,
+                        ::testing::ValuesIn(kMemoryAccessImmMoreThan16bit));
+
+
+// ----------------------------------------------------------------------------
+// kMipsTst testing.
+// ----------------------------------------------------------------------------
+
+
+TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kMipsTst, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Word32Equal(m.Int32Constant(0), m.Parameter(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kMipsTst, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/test/unittests/heap/gc-idle-time-handler-unittest.cc b/test/unittests/heap/gc-idle-time-handler-unittest.cc
index b4f2f74..0b0d40c 100644
--- a/test/unittests/heap/gc-idle-time-handler-unittest.cc
+++ b/test/unittests/heap/gc-idle-time-handler-unittest.cc
@@ -29,7 +29,7 @@
     result.mark_compact_speed_in_bytes_per_ms = kMarkCompactSpeed;
     result.incremental_marking_speed_in_bytes_per_ms = kMarkingSpeed;
     result.scavenge_speed_in_bytes_per_ms = kScavengeSpeed;
-    result.available_new_space_memory = kNewSpaceCapacity;
+    result.used_new_space_size = 0;
     result.new_space_capacity = kNewSpaceCapacity;
     result.new_space_allocation_throughput_in_bytes_per_ms =
         kNewSpaceAllocationThroughput;
@@ -109,38 +109,60 @@
 }
 
 
-TEST(GCIdleTimeHandler, EstimateScavengeTimeInitial) {
-  size_t size = 1 * MB;
-  size_t time = GCIdleTimeHandler::EstimateScavengeTime(size, 0);
-  EXPECT_EQ(size / GCIdleTimeHandler::kInitialConservativeScavengeSpeed, time);
+TEST_F(GCIdleTimeHandlerTest, DoScavengeEmptyNewSpace) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  int idle_time_in_ms = 16;
+  EXPECT_FALSE(GCIdleTimeHandler::DoScavenge(
+      idle_time_in_ms, heap_state.new_space_capacity,
+      heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
+      heap_state.new_space_allocation_throughput_in_bytes_per_ms));
 }
 
 
-TEST(GCIdleTimeHandler, EstimateScavengeTimeNonZero) {
-  size_t size = 1 * MB;
-  size_t speed = 1 * MB;
-  size_t time = GCIdleTimeHandler::EstimateScavengeTime(size, speed);
-  EXPECT_EQ(size / speed, time);
+TEST_F(GCIdleTimeHandlerTest, DoScavengeFullNewSpace) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  heap_state.used_new_space_size = kNewSpaceCapacity;
+  int idle_time_in_ms = 16;
+  EXPECT_TRUE(GCIdleTimeHandler::DoScavenge(
+      idle_time_in_ms, heap_state.new_space_capacity,
+      heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
+      heap_state.new_space_allocation_throughput_in_bytes_per_ms));
 }
 
 
-TEST(GCIdleTimeHandler, ScavangeMayHappenSoonInitial) {
-  size_t available = 100 * KB;
-  EXPECT_FALSE(GCIdleTimeHandler::ScavangeMayHappenSoon(available, 0));
+TEST_F(GCIdleTimeHandlerTest, DoScavengeUnknownScavengeSpeed) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  heap_state.used_new_space_size = kNewSpaceCapacity;
+  heap_state.scavenge_speed_in_bytes_per_ms = 0;
+  int idle_time_in_ms = 16;
+  EXPECT_FALSE(GCIdleTimeHandler::DoScavenge(
+      idle_time_in_ms, heap_state.new_space_capacity,
+      heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
+      heap_state.new_space_allocation_throughput_in_bytes_per_ms));
 }
 
 
-TEST(GCIdleTimeHandler, ScavangeMayHappenSoonNonZeroFalse) {
-  size_t available = (GCIdleTimeHandler::kMaxFrameRenderingIdleTime + 1) * KB;
-  size_t speed = 1 * KB;
-  EXPECT_FALSE(GCIdleTimeHandler::ScavangeMayHappenSoon(available, speed));
+TEST_F(GCIdleTimeHandlerTest, DoScavengeLowScavengeSpeed) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  heap_state.used_new_space_size = kNewSpaceCapacity;
+  heap_state.scavenge_speed_in_bytes_per_ms = 1 * KB;
+  int idle_time_in_ms = 16;
+  EXPECT_FALSE(GCIdleTimeHandler::DoScavenge(
+      idle_time_in_ms, heap_state.new_space_capacity,
+      heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
+      heap_state.new_space_allocation_throughput_in_bytes_per_ms));
 }
 
 
-TEST(GCIdleTimeHandler, ScavangeMayHappenSoonNonZeroTrue) {
-  size_t available = GCIdleTimeHandler::kMaxFrameRenderingIdleTime * KB;
-  size_t speed = 1 * KB;
-  EXPECT_TRUE(GCIdleTimeHandler::ScavangeMayHappenSoon(available, speed));
+TEST_F(GCIdleTimeHandlerTest, DoScavengeHighScavengeSpeed) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  heap_state.used_new_space_size = kNewSpaceCapacity;
+  heap_state.scavenge_speed_in_bytes_per_ms = kNewSpaceCapacity;
+  int idle_time_in_ms = 16;
+  EXPECT_TRUE(GCIdleTimeHandler::DoScavenge(
+      idle_time_in_ms, heap_state.new_space_capacity,
+      heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
+      heap_state.new_space_allocation_throughput_in_bytes_per_ms));
 }
 
 
@@ -294,8 +316,9 @@
 TEST_F(GCIdleTimeHandlerTest, Scavenge) {
   GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
   int idle_time_ms = 10;
-  heap_state.available_new_space_memory =
-      kNewSpaceAllocationThroughput * idle_time_ms;
+  heap_state.used_new_space_size =
+      heap_state.new_space_capacity -
+      (kNewSpaceAllocationThroughput * idle_time_ms);
   GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
   EXPECT_EQ(DO_SCAVENGE, action.type);
 }
@@ -306,11 +329,12 @@
   int idle_time_ms = 10;
   heap_state.can_start_incremental_marking = false;
   heap_state.incremental_marking_stopped = true;
-  heap_state.available_new_space_memory =
-      kNewSpaceAllocationThroughput * idle_time_ms;
+  heap_state.used_new_space_size =
+      heap_state.new_space_capacity -
+      (kNewSpaceAllocationThroughput * idle_time_ms);
   GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
   EXPECT_EQ(DO_SCAVENGE, action.type);
-  heap_state.available_new_space_memory = kNewSpaceCapacity;
+  heap_state.used_new_space_size = 0;
   action = handler()->Compute(idle_time_ms, heap_state);
   EXPECT_EQ(DO_NOTHING, action.type);
 }
diff --git a/test/unittests/test-utils.cc b/test/unittests/test-utils.cc
index a3532dd..7fe0321 100644
--- a/test/unittests/test-utils.cc
+++ b/test/unittests/test-utils.cc
@@ -8,6 +8,32 @@
 
 namespace v8 {
 
+std::ostream& operator<<(std::ostream& os, ExternalArrayType type) {
+  switch (type) {
+    case kExternalInt8Array:
+      return os << "ExternalInt8Array";
+    case kExternalUint8Array:
+      return os << "ExternalUint8Array";
+    case kExternalInt16Array:
+      return os << "ExternalInt16Array";
+    case kExternalUint16Array:
+      return os << "ExternalUint16Array";
+    case kExternalInt32Array:
+      return os << "ExternalInt32Array";
+    case kExternalUint32Array:
+      return os << "ExternalUint32Array";
+    case kExternalFloat32Array:
+      return os << "ExternalFloat32Array";
+    case kExternalFloat64Array:
+      return os << "ExternalFloat64Array";
+    case kExternalUint8ClampedArray:
+      return os << "ExternalUint8ClampedArray";
+  }
+  UNREACHABLE();
+  return os;
+}
+
+
 // static
 Isolate* TestWithIsolate::isolate_ = NULL;
 
diff --git a/test/unittests/test-utils.h b/test/unittests/test-utils.h
index e08974a..7fb94f3 100644
--- a/test/unittests/test-utils.h
+++ b/test/unittests/test-utils.h
@@ -12,6 +12,9 @@
 
 namespace v8 {
 
+std::ostream& operator<<(std::ostream&, ExternalArrayType);
+
+
 class TestWithIsolate : public ::testing::Test {
  public:
   TestWithIsolate();
diff --git a/test/unittests/unittests.gyp b/test/unittests/unittests.gyp
index be34c83..09634d2 100644
--- a/test/unittests/unittests.gyp
+++ b/test/unittests/unittests.gyp
@@ -44,6 +44,7 @@
         'compiler/instruction-selector-unittest.h',
         'compiler/js-builtin-reducer-unittest.cc',
         'compiler/js-operator-unittest.cc',
+        'compiler/js-typed-lowering-unittest.cc',
         'compiler/machine-operator-reducer-unittest.cc',
         'compiler/machine-operator-unittest.cc',
         'compiler/simplified-operator-reducer-unittest.cc',
@@ -73,6 +74,11 @@
             'compiler/ia32/instruction-selector-ia32-unittest.cc',
           ],
         }],
+        ['v8_target_arch=="mipsel"', {
+          'sources': [  ### gcmole(arch:mipsel) ###
+            'compiler/mips/instruction-selector-mips-unittest.cc',
+          ],
+        }],
         ['v8_target_arch=="x64"', {
           'sources': [  ### gcmole(arch:x64) ###
             'compiler/x64/instruction-selector-x64-unittest.cc',
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index 77c8929..916fd28 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -1042,6 +1042,10 @@
             '../../src/mips/regexp-macro-assembler-mips.cc',
             '../../src/mips/regexp-macro-assembler-mips.h',
             '../../src/mips/simulator-mips.cc',
+            '../../src/compiler/mips/code-generator-mips.cc',
+            '../../src/compiler/mips/instruction-codes-mips.h',
+            '../../src/compiler/mips/instruction-selector-mips.cc',
+            '../../src/compiler/mips/linkage-mips.cc',
             '../../src/ic/mips/access-compiler-mips.cc',
             '../../src/ic/mips/handler-compiler-mips.cc',
             '../../src/ic/mips/ic-mips.cc',