Version 3.30.10 (based on bleeding_edge revision r24593)

Squeeze the layout of various AST node types (Chromium issue 417697).

Performance and stability improvements on all platforms.

git-svn-id: https://v8.googlecode.com/svn/trunk@24612 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 0111e05..8971304 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,10 @@
+2014-10-15: Version 3.30.10
+
+        Squeeze the layout of various AST node types (Chromium issue 417697).
+
+        Performance and stability improvements on all platforms.
+
+
 2014-10-14: Version 3.30.9
 
         Performance and stability improvements on all platforms.
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index b3da1d6..4546511 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -1571,11 +1571,27 @@
 }
 
 
-void Assembler::mul(Register dst, Register src1, Register src2,
-                    SBit s, Condition cond) {
+void Assembler::mul(Register dst, Register src1, Register src2, SBit s,
+                    Condition cond) {
   DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
   // dst goes in bits 16-19 for this instruction!
-  emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
+  emit(cond | s | dst.code() * B16 | src2.code() * B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::smmla(Register dst, Register src1, Register src2, Register srcA,
+                      Condition cond) {
+  DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
+  emit(cond | B26 | B25 | B24 | B22 | B20 | dst.code() * B16 |
+       srcA.code() * B12 | src2.code() * B8 | B4 | src1.code());
+}
+
+
+void Assembler::smmul(Register dst, Register src1, Register src2,
+                      Condition cond) {
+  DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
+  emit(cond | B26 | B25 | B24 | B22 | B20 | dst.code() * B16 | 0xf * B12 |
+       src2.code() * B8 | B4 | src1.code());
 }
 
 
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index e0c3a97..f78cc50 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -975,6 +975,11 @@
   void mul(Register dst, Register src1, Register src2,
            SBit s = LeaveCC, Condition cond = al);
 
+  void smmla(Register dst, Register src1, Register src2, Register srcA,
+             Condition cond = al);
+
+  void smmul(Register dst, Register src1, Register src2, Condition cond = al);
+
   void smlal(Register dstL, Register dstH, Register src1, Register src2,
              SBit s = LeaveCC, Condition cond = al);
 
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 85977b1..2af6112 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -1096,6 +1096,17 @@
       break;
     }
     case db_x: {
+      if (instr->Bits(22, 20) == 0x5) {
+        if (instr->Bits(7, 4) == 0x1) {
+          if (instr->Bits(15, 12) == 0xF) {
+            Format(instr, "smmul'cond 'rn, 'rm, 'rs");
+          } else {
+            // SMMLA (in V8 notation matching ARM ISA format)
+            Format(instr, "smmla'cond 'rn, 'rm, 'rs, 'rd");
+          }
+          break;
+        }
+      }
       if (FLAG_enable_sudiv) {
         if (instr->Bits(5, 4) == 0x1) {
           if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 88422ef..9294a8c 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -4071,21 +4071,22 @@
   DCHECK(!dividend.is(ip));
   DCHECK(!result.is(ip));
   base::MagicNumbersForDivision<uint32_t> mag =
-      base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
+      base::SignedDivisionByConstant(bit_cast<uint32_t>(divisor));
   mov(ip, Operand(mag.multiplier));
-  smull(ip, result, dividend, ip);
-  bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
+  bool neg = (mag.multiplier & (1U << 31)) != 0;
   if (divisor > 0 && neg) {
-    add(result, result, Operand(dividend));
-  }
-  if (divisor < 0 && !neg && mag.multiplier > 0) {
-    sub(result, result, Operand(dividend));
+    smmla(result, dividend, ip, dividend);
+  } else {
+    smmul(result, dividend, ip);
+    if (divisor < 0 && !neg && mag.multiplier > 0) {
+      sub(result, result, Operand(dividend));
+    }
   }
   if (mag.shift > 0) mov(result, Operand(result, ASR, mag.shift));
   add(result, result, Operand(dividend, LSR, 31));
 }
 
-
-} }  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 0444025..5105f1e 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -2710,6 +2710,27 @@
       break;
     }
     case db_x: {
+      if (instr->Bits(22, 20) == 0x5) {
+        if (instr->Bits(7, 4) == 0x1) {
+          int rm = instr->RmValue();
+          int32_t rm_val = get_register(rm);
+          int rs = instr->RsValue();
+          int32_t rs_val = get_register(rs);
+          if (instr->Bits(15, 12) == 0xF) {
+            // SMMUL (in V8 notation matching ARM ISA format)
+            // Format(instr, "smmul'cond 'rn, 'rm, 'rs");
+            rn_val = base::bits::SignedMulHigh32(rm_val, rs_val);
+          } else {
+            // SMMLA (in V8 notation matching ARM ISA format)
+            // Format(instr, "smmla'cond 'rn, 'rm, 'rs, 'rd");
+            int rd = instr->RdValue();
+            int32_t rd_val = get_register(rd);
+            rn_val = base::bits::SignedMulHighAndAdd32(rm_val, rs_val, rd_val);
+          }
+          set_register(rn, rn_val);
+          return;
+        }
+      }
       if (FLAG_enable_sudiv) {
         if (instr->Bits(5, 4) == 0x1) {
           if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
diff --git a/src/ast.cc b/src/ast.cc
index 60db913..5f9277b 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -62,12 +62,12 @@
 VariableProxy::VariableProxy(Zone* zone, Variable* var, int position,
                              IdGen* id_gen)
     : Expression(zone, position, 0, id_gen),
-      raw_name_(var->raw_name()),
-      interface_(var->interface()),
-      variable_feedback_slot_(FeedbackVectorSlot::Invalid()),
       is_this_(var->is_this()),
       is_assigned_(false),
-      is_resolved_(false) {
+      is_resolved_(false),
+      variable_feedback_slot_(FeedbackVectorSlot::Invalid()),
+      raw_name_(var->raw_name()),
+      interface_(var->interface()) {
   BindTo(var);
 }
 
@@ -75,12 +75,12 @@
 VariableProxy::VariableProxy(Zone* zone, const AstRawString* name, bool is_this,
                              Interface* interface, int position, IdGen* id_gen)
     : Expression(zone, position, 0, id_gen),
-      raw_name_(name),
-      interface_(interface),
-      variable_feedback_slot_(FeedbackVectorSlot::Invalid()),
       is_this_(is_this),
       is_assigned_(false),
-      is_resolved_(false) {}
+      is_resolved_(false),
+      variable_feedback_slot_(FeedbackVectorSlot::Invalid()),
+      raw_name_(name),
+      interface_(interface) {}
 
 
 void VariableProxy::BindTo(Variable* var) {
@@ -100,12 +100,12 @@
 Assignment::Assignment(Zone* zone, Token::Value op, Expression* target,
                        Expression* value, int pos, IdGen* id_gen)
     : Expression(zone, pos, num_ids(), id_gen),
+      is_uninitialized_(false),
+      store_mode_(STANDARD_STORE),
       op_(op),
       target_(target),
       value_(value),
-      binary_operation_(NULL),
-      is_uninitialized_(false),
-      store_mode_(STANDARD_STORE) {}
+      binary_operation_(NULL) {}
 
 
 Token::Value Assignment::binary_op() const {
@@ -434,7 +434,7 @@
 
 
 bool BinaryOperation::ResultOverwriteAllowed() const {
-  switch (op_) {
+  switch (op()) {
     case Token::COMMA:
     case Token::OR:
     case Token::AND:
diff --git a/src/ast.h b/src/ast.h
index a3fb962..3b36b8a 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -391,22 +391,22 @@
  protected:
   Expression(Zone* zone, int pos, int num_ids_needed_by_subclass, IdGen* id_gen)
       : AstNode(pos),
-        is_parenthesized_(false),
-        is_multi_parenthesized_(false),
-        bounds_(Bounds::Unbounded(zone)),
         base_id_(
-            id_gen->ReserveIdRange(num_ids_needed_by_subclass + num_ids())) {}
+            id_gen->ReserveIdRange(num_ids_needed_by_subclass + num_ids())),
+        bounds_(Bounds::Unbounded(zone)),
+        is_parenthesized_(false),
+        is_multi_parenthesized_(false) {}
   void set_to_boolean_types(byte types) { to_boolean_types_ = types; }
 
   static int num_ids() { return 2; }
   int base_id() const { return base_id_; }
 
  private:
+  const int base_id_;
+  Bounds bounds_;
   byte to_boolean_types_;
   bool is_parenthesized_ : 1;
   bool is_multi_parenthesized_ : 1;
-  Bounds bounds_;
-  const int base_id_;
 };
 
 
@@ -511,21 +511,15 @@
   virtual bool IsInlineable() const;
 
  protected:
-  Declaration(Zone* zone,
-              VariableProxy* proxy,
-              VariableMode mode,
-              Scope* scope,
+  Declaration(Zone* zone, VariableProxy* proxy, VariableMode mode, Scope* scope,
               int pos)
-      : AstNode(pos),
-        proxy_(proxy),
-        mode_(mode),
-        scope_(scope) {
+      : AstNode(pos), mode_(mode), proxy_(proxy), scope_(scope) {
     DCHECK(IsDeclaredVariableMode(mode));
   }
 
  private:
-  VariableProxy* proxy_;
   VariableMode mode_;
+  VariableProxy* proxy_;
 
   // Nested scope from which the declaration originated.
   Scope* scope_;
@@ -1711,15 +1705,15 @@
   VariableProxy(Zone* zone, const AstRawString* name, bool is_this,
                 Interface* interface, int position, IdGen* id_gen);
 
+  bool is_this_ : 1;
+  bool is_assigned_ : 1;
+  bool is_resolved_ : 1;
+  FeedbackVectorSlot variable_feedback_slot_;
   union {
     const AstRawString* raw_name_;  // if !is_resolved_
     Variable* var_;                 // if is_resolved_
   };
   Interface* interface_;
-  FeedbackVectorSlot variable_feedback_slot_;
-  bool is_this_ : 1;
-  bool is_assigned_ : 1;
-  bool is_resolved_ : 1;
 };
 
 
@@ -1773,25 +1767,24 @@
  protected:
   Property(Zone* zone, Expression* obj, Expression* key, int pos, IdGen* id_gen)
       : Expression(zone, pos, num_ids(), id_gen),
-        obj_(obj),
-        key_(key),
-        property_feedback_slot_(FeedbackVectorSlot::Invalid()),
         is_for_call_(false),
         is_uninitialized_(false),
-        is_string_access_(false) {}
+        is_string_access_(false),
+        property_feedback_slot_(FeedbackVectorSlot::Invalid()),
+        obj_(obj),
+        key_(key) {}
 
   static int num_ids() { return 2; }
   int base_id() const { return Expression::base_id() + Expression::num_ids(); }
 
  private:
-  Expression* obj_;
-  Expression* key_;
-  FeedbackVectorSlot property_feedback_slot_;
-
-  SmallMapList receiver_types_;
   bool is_for_call_ : 1;
   bool is_uninitialized_ : 1;
   bool is_string_access_ : 1;
+  FeedbackVectorSlot property_feedback_slot_;
+  Expression* obj_;
+  Expression* key_;
+  SmallMapList receiver_types_;
 };
 
 
@@ -1870,9 +1863,9 @@
   Call(Zone* zone, Expression* expression, ZoneList<Expression*>* arguments,
        int pos, IdGen* id_gen)
       : Expression(zone, pos, num_ids(), id_gen),
+        call_feedback_slot_(FeedbackVectorSlot::Invalid()),
         expression_(expression),
-        arguments_(arguments),
-        call_feedback_slot_(FeedbackVectorSlot::Invalid()) {
+        arguments_(arguments) {
     if (expression->IsProperty()) {
       expression->AsProperty()->mark_for_call();
     }
@@ -1882,12 +1875,12 @@
   int base_id() const { return Expression::base_id() + Expression::num_ids(); }
 
  private:
+  FeedbackVectorSlot call_feedback_slot_;
   Expression* expression_;
   ZoneList<Expression*>* arguments_;
   Handle<JSFunction> target_;
   Handle<Cell> cell_;
   Handle<AllocationSite> allocation_site_;
-  FeedbackVectorSlot call_feedback_slot_;
 };
 
 
@@ -2035,7 +2028,7 @@
 
   virtual bool ResultOverwriteAllowed() const OVERRIDE;
 
-  Token::Value op() const { return op_; }
+  Token::Value op() const { return static_cast<Token::Value>(op_); }
   Expression* left() const { return left_; }
   Expression* right() const { return right_; }
   Handle<AllocationSite> allocation_site() const { return allocation_site_; }
@@ -2050,8 +2043,14 @@
   TypeFeedbackId BinaryOperationFeedbackId() const {
     return TypeFeedbackId(base_id() + 1);
   }
-  Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
-  void set_fixed_right_arg(Maybe<int> arg) { fixed_right_arg_ = arg; }
+  Maybe<int> fixed_right_arg() const {
+    return has_fixed_right_arg_ ? Maybe<int>(fixed_right_arg_value_)
+                                : Maybe<int>();
+  }
+  void set_fixed_right_arg(Maybe<int> arg) {
+    has_fixed_right_arg_ = arg.has_value;
+    if (arg.has_value) fixed_right_arg_value_ = arg.value;
+  }
 
   virtual void RecordToBooleanTypeFeedback(
       TypeFeedbackOracle* oracle) OVERRIDE;
@@ -2060,7 +2059,7 @@
   BinaryOperation(Zone* zone, Token::Value op, Expression* left,
                   Expression* right, int pos, IdGen* id_gen)
       : Expression(zone, pos, num_ids(), id_gen),
-        op_(op),
+        op_(static_cast<byte>(op)),
         left_(left),
         right_(right) {
     DCHECK(Token::IsBinaryOp(op));
@@ -2070,14 +2069,14 @@
   int base_id() const { return Expression::base_id() + Expression::num_ids(); }
 
  private:
-  Token::Value op_;
+  const byte op_;  // actually Token::Value
+  // TODO(rossberg): the fixed arg should probably be represented as a Constant
+  // type for the RHS. Currenty it's actually a Maybe<int>
+  bool has_fixed_right_arg_;
+  int fixed_right_arg_value_;
   Expression* left_;
   Expression* right_;
   Handle<AllocationSite> allocation_site_;
-
-  // TODO(rossberg): the fixed arg should probably be represented as a Constant
-  // type for the RHS.
-  Maybe<int> fixed_right_arg_;
 };
 
 
@@ -2271,14 +2270,14 @@
   }
 
  private:
-  Token::Value op_;
-  Expression* target_;
-  Expression* value_;
-  BinaryOperation* binary_operation_;
   bool is_uninitialized_ : 1;
   IcCheckType key_type_ : 1;
   KeyedAccessStoreMode store_mode_ : 5;  // Windows treats as signed,
                                          // must have extra bit.
+  Token::Value op_;
+  Expression* target_;
+  Expression* value_;
+  BinaryOperation* binary_operation_;
   SmallMapList receiver_types_;
 };
 
diff --git a/src/base/bits.cc b/src/base/bits.cc
index 6daee53..2818b93 100644
--- a/src/base/bits.cc
+++ b/src/base/bits.cc
@@ -20,6 +20,18 @@
   return value + 1;
 }
 
+
+int32_t SignedMulHigh32(int32_t lhs, int32_t rhs) {
+  int64_t const value = static_cast<int64_t>(lhs) * static_cast<int64_t>(rhs);
+  return bit_cast<int32_t, uint32_t>(bit_cast<uint64_t>(value) >> 32u);
+}
+
+
+int32_t SignedMulHighAndAdd32(int32_t lhs, int32_t rhs, int32_t acc) {
+  return bit_cast<int32_t>(bit_cast<uint32_t>(acc) +
+                           bit_cast<uint32_t>(SignedMulHigh32(lhs, rhs)));
+}
+
 }  // namespace bits
 }  // namespace base
 }  // namespace v8
diff --git a/src/base/bits.h b/src/base/bits.h
index d681ba8..5e0e248 100644
--- a/src/base/bits.h
+++ b/src/base/bits.h
@@ -187,6 +187,18 @@
 #endif
 }
 
+
+// SignedMulHigh32(lhs, rhs) multiplies two signed 32-bit values |lhs| and
+// |rhs|, extracts the most significant 32 bits of the result, and returns
+// those.
+int32_t SignedMulHigh32(int32_t lhs, int32_t rhs);
+
+
+// SignedMulHighAndAdd32(lhs, rhs, acc) multiplies two signed 32-bit values
+// |lhs| and |rhs|, extracts the most significant 32 bits of the result, and
+// adds the accumulate value |acc|.
+int32_t SignedMulHighAndAdd32(int32_t lhs, int32_t rhs, int32_t acc);
+
 }  // namespace bits
 }  // namespace base
 }  // namespace v8
diff --git a/src/compiler.cc b/src/compiler.cc
index ce3badb..244ed8b 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -165,6 +165,14 @@
   if (!script_.is_null() && script_->type()->value() == Script::TYPE_NATIVE) {
     MarkAsNative();
   }
+  // Compiling for the snapshot typically results in different code than
+  // compiling later on. This means that code recompiled with deoptimization
+  // support won't be "equivalent" (as defined by SharedFunctionInfo::
+  // EnableDeoptimizationSupport), so it will replace the old code and all
+  // its type feedback. To avoid this, always compile functions in the snapshot
+  // with deoptimization support.
+  if (isolate_->serializer_enabled()) EnableDeoptimizationSupport();
+
   if (isolate_->debug()->is_active()) MarkAsDebug();
   if (FLAG_context_specialization) MarkAsContextSpecializing();
   if (FLAG_turbo_inlining) MarkAsInliningEnabled();
diff --git a/src/compiler/arm/code-generator-arm.cc b/src/compiler/arm/code-generator-arm.cc
index f278326..6f8859b 100644
--- a/src/compiler/arm/code-generator-arm.cc
+++ b/src/compiler/arm/code-generator-arm.cc
@@ -193,7 +193,7 @@
       break;
     }
     case kArchJmp:
-      __ b(code_->GetLabel(i.InputBlock(0)));
+      __ b(code_->GetLabel(i.InputRpo(0)));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArchNop:
@@ -485,8 +485,10 @@
 
   // Emit a branch. The true and false targets are always the last two inputs
   // to the instruction.
-  BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
-  BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
+  BasicBlock::RpoNumber tblock =
+      i.InputRpo(static_cast<int>(instr->InputCount()) - 2);
+  BasicBlock::RpoNumber fblock =
+      i.InputRpo(static_cast<int>(instr->InputCount()) - 1);
   bool fallthru = IsNextInAssemblyOrder(fblock);
   Label* tlabel = code()->GetLabel(tblock);
   Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
diff --git a/src/compiler/arm64/code-generator-arm64.cc b/src/compiler/arm64/code-generator-arm64.cc
index da229d8..c4a7d0e 100644
--- a/src/compiler/arm64/code-generator-arm64.cc
+++ b/src/compiler/arm64/code-generator-arm64.cc
@@ -46,10 +46,54 @@
 
   Register OutputRegister32() { return ToRegister(instr_->Output()).W(); }
 
+  Operand InputOperand2_32(int index) {
+    switch (AddressingModeField::decode(instr_->opcode())) {
+      case kMode_None:
+        return InputOperand32(index);
+      case kMode_Operand2_R_LSL_I:
+        return Operand(InputRegister32(index), LSL, InputInt5(index + 1));
+      case kMode_Operand2_R_LSR_I:
+        return Operand(InputRegister32(index), LSR, InputInt5(index + 1));
+      case kMode_Operand2_R_ASR_I:
+        return Operand(InputRegister32(index), ASR, InputInt5(index + 1));
+      case kMode_Operand2_R_ROR_I:
+        return Operand(InputRegister32(index), ROR, InputInt5(index + 1));
+      case kMode_MRI:
+      case kMode_MRR:
+        break;
+    }
+    UNREACHABLE();
+    return Operand(-1);
+  }
+
+  Operand InputOperand2_64(int index) {
+    switch (AddressingModeField::decode(instr_->opcode())) {
+      case kMode_None:
+        return InputOperand64(index);
+      case kMode_Operand2_R_LSL_I:
+        return Operand(InputRegister64(index), LSL, InputInt6(index + 1));
+      case kMode_Operand2_R_LSR_I:
+        return Operand(InputRegister64(index), LSR, InputInt6(index + 1));
+      case kMode_Operand2_R_ASR_I:
+        return Operand(InputRegister64(index), ASR, InputInt6(index + 1));
+      case kMode_Operand2_R_ROR_I:
+        return Operand(InputRegister64(index), ROR, InputInt6(index + 1));
+      case kMode_MRI:
+      case kMode_MRR:
+        break;
+    }
+    UNREACHABLE();
+    return Operand(-1);
+  }
+
   MemOperand MemoryOperand(int* first_index) {
     const int index = *first_index;
     switch (AddressingModeField::decode(instr_->opcode())) {
       case kMode_None:
+      case kMode_Operand2_R_LSL_I:
+      case kMode_Operand2_R_LSR_I:
+      case kMode_Operand2_R_ASR_I:
+      case kMode_Operand2_R_ROR_I:
         break;
       case kMode_MRI:
         *first_index += 2;
@@ -164,7 +208,7 @@
       break;
     }
     case kArchJmp:
-      __ B(code_->GetLabel(i.InputBlock(0)));
+      __ B(code_->GetLabel(i.InputRpo(0)));
       break;
     case kArchNop:
       // don't emit code for nops.
@@ -179,27 +223,28 @@
       __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
       break;
     case kArm64Add:
-      __ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      __ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
       break;
     case kArm64Add32:
       if (FlagsModeField::decode(opcode) != kFlags_none) {
         __ Adds(i.OutputRegister32(), i.InputRegister32(0),
-                i.InputOperand32(1));
+                i.InputOperand2_32(1));
       } else {
-        __ Add(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+        __ Add(i.OutputRegister32(), i.InputRegister32(0),
+               i.InputOperand2_32(1));
       }
       break;
     case kArm64And:
-      __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
       break;
     case kArm64And32:
-      __ And(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+      __ And(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
       break;
     case kArm64Bic:
-      __ Bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      __ Bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
       break;
     case kArm64Bic32:
-      __ Bic(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+      __ Bic(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
       break;
     case kArm64Mul:
       __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
@@ -285,38 +330,39 @@
       __ Neg(i.OutputRegister32(), i.InputOperand32(0));
       break;
     case kArm64Or:
-      __ Orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      __ Orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
       break;
     case kArm64Or32:
-      __ Orr(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+      __ Orr(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
       break;
     case kArm64Orn:
-      __ Orn(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      __ Orn(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
       break;
     case kArm64Orn32:
-      __ Orn(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+      __ Orn(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
       break;
     case kArm64Eor:
-      __ Eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      __ Eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
       break;
     case kArm64Eor32:
-      __ Eor(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+      __ Eor(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
       break;
     case kArm64Eon:
-      __ Eon(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      __ Eon(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
       break;
     case kArm64Eon32:
-      __ Eon(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+      __ Eon(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand2_32(1));
       break;
     case kArm64Sub:
-      __ Sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      __ Sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
       break;
     case kArm64Sub32:
       if (FlagsModeField::decode(opcode) != kFlags_none) {
         __ Subs(i.OutputRegister32(), i.InputRegister32(0),
-                i.InputOperand32(1));
+                i.InputOperand2_32(1));
       } else {
-        __ Sub(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+        __ Sub(i.OutputRegister32(), i.InputRegister32(0),
+               i.InputOperand2_32(1));
       }
       break;
     case kArm64Lsl:
@@ -526,8 +572,10 @@
 
   // Emit a branch. The true and false targets are always the last two inputs
   // to the instruction.
-  BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
-  BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
+  BasicBlock::RpoNumber tblock =
+      i.InputRpo(static_cast<int>(instr->InputCount()) - 2);
+  BasicBlock::RpoNumber fblock =
+      i.InputRpo(static_cast<int>(instr->InputCount()) - 1);
   bool fallthru = IsNextInAssemblyOrder(fblock);
   Label* tlabel = code()->GetLabel(tblock);
   Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
diff --git a/src/compiler/arm64/instruction-codes-arm64.h b/src/compiler/arm64/instruction-codes-arm64.h
index 0f0d1d2..4d19290 100644
--- a/src/compiler/arm64/instruction-codes-arm64.h
+++ b/src/compiler/arm64/instruction-codes-arm64.h
@@ -113,9 +113,13 @@
 // I = immediate (handle, external, int32)
 // MRI = [register + immediate]
 // MRR = [register + register]
-#define TARGET_ADDRESSING_MODE_LIST(V) \
-  V(MRI) /* [%r0 + K] */               \
-  V(MRR) /* [%r0 + %r1] */
+#define TARGET_ADDRESSING_MODE_LIST(V)  \
+  V(MRI)              /* [%r0 + K] */   \
+  V(MRR)              /* [%r0 + %r1] */ \
+  V(Operand2_R_LSL_I) /* %r0 LSL K */   \
+  V(Operand2_R_LSR_I) /* %r0 LSR K */   \
+  V(Operand2_R_ASR_I) /* %r0 ASR K */   \
+  V(Operand2_R_ROR_I) /* %r0 ROR K */
 
 }  // namespace internal
 }  // namespace compiler
diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc
index 4786007..893c005 100644
--- a/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/src/compiler/arm64/instruction-selector-arm64.cc
@@ -113,6 +113,51 @@
 }
 
 
+template <typename Matcher>
+static bool TryMatchShift(InstructionSelector* selector, Node* node,
+                          InstructionCode* opcode, IrOpcode::Value shift_opcode,
+                          ImmediateMode imm_mode,
+                          AddressingMode addressing_mode) {
+  if (node->opcode() != shift_opcode) return false;
+  Arm64OperandGenerator g(selector);
+  Matcher m(node);
+  if (g.CanBeImmediate(m.right().node(), imm_mode)) {
+    *opcode |= AddressingModeField::encode(addressing_mode);
+    return true;
+  }
+  return false;
+}
+
+
+static bool TryMatchAnyShift(InstructionSelector* selector, Node* node,
+                             InstructionCode* opcode, bool try_ror) {
+  return TryMatchShift<Int32BinopMatcher>(selector, node, opcode,
+                                          IrOpcode::kWord32Shl, kShift32Imm,
+                                          kMode_Operand2_R_LSL_I) ||
+         TryMatchShift<Int32BinopMatcher>(selector, node, opcode,
+                                          IrOpcode::kWord32Shr, kShift32Imm,
+                                          kMode_Operand2_R_LSR_I) ||
+         TryMatchShift<Int32BinopMatcher>(selector, node, opcode,
+                                          IrOpcode::kWord32Sar, kShift32Imm,
+                                          kMode_Operand2_R_ASR_I) ||
+         (try_ror && TryMatchShift<Int32BinopMatcher>(
+                         selector, node, opcode, IrOpcode::kWord32Ror,
+                         kShift32Imm, kMode_Operand2_R_ROR_I)) ||
+         TryMatchShift<Int64BinopMatcher>(selector, node, opcode,
+                                          IrOpcode::kWord64Shl, kShift64Imm,
+                                          kMode_Operand2_R_LSL_I) ||
+         TryMatchShift<Int64BinopMatcher>(selector, node, opcode,
+                                          IrOpcode::kWord64Shr, kShift64Imm,
+                                          kMode_Operand2_R_LSR_I) ||
+         TryMatchShift<Int64BinopMatcher>(selector, node, opcode,
+                                          IrOpcode::kWord64Sar, kShift64Imm,
+                                          kMode_Operand2_R_ASR_I) ||
+         (try_ror && TryMatchShift<Int64BinopMatcher>(
+                         selector, node, opcode, IrOpcode::kWord64Ror,
+                         kShift64Imm, kMode_Operand2_R_ROR_I));
+}
+
+
 // Shared routine for multiple binary operations.
 template <typename Matcher>
 static void VisitBinop(InstructionSelector* selector, Node* node,
@@ -124,9 +169,32 @@
   size_t input_count = 0;
   InstructionOperand* outputs[2];
   size_t output_count = 0;
+  bool try_ror_operand = true;
 
-  inputs[input_count++] = g.UseRegister(m.left().node());
-  inputs[input_count++] = g.UseOperand(m.right().node(), operand_mode);
+  if (m.IsInt32Add() || m.IsInt64Add() || m.IsInt32Sub() || m.IsInt64Sub()) {
+    try_ror_operand = false;
+  }
+
+  if (g.CanBeImmediate(m.right().node(), operand_mode)) {
+    inputs[input_count++] = g.UseRegister(m.left().node());
+    inputs[input_count++] = g.UseImmediate(m.right().node());
+  } else if (TryMatchAnyShift(selector, m.right().node(), &opcode,
+                              try_ror_operand)) {
+    Matcher m_shift(m.right().node());
+    inputs[input_count++] = g.UseRegister(m.left().node());
+    inputs[input_count++] = g.UseRegister(m_shift.left().node());
+    inputs[input_count++] = g.UseImmediate(m_shift.right().node());
+  } else if (m.HasProperty(Operator::kCommutative) &&
+             TryMatchAnyShift(selector, m.left().node(), &opcode,
+                              try_ror_operand)) {
+    Matcher m_shift(m.left().node());
+    inputs[input_count++] = g.UseRegister(m.right().node());
+    inputs[input_count++] = g.UseRegister(m_shift.left().node());
+    inputs[input_count++] = g.UseImmediate(m_shift.right().node());
+  } else {
+    inputs[input_count++] = g.UseRegister(m.left().node());
+    inputs[input_count++] = g.UseRegister(m.right().node());
+  }
 
   if (cont->IsBranch()) {
     inputs[input_count++] = g.Label(cont->true_block());
diff --git a/src/compiler/code-generator-impl.h b/src/compiler/code-generator-impl.h
index 318b7e9..92d3b05 100644
--- a/src/compiler/code-generator-impl.h
+++ b/src/compiler/code-generator-impl.h
@@ -6,14 +6,9 @@
 #define V8_COMPILER_CODE_GENERATOR_IMPL_H_
 
 #include "src/compiler/code-generator.h"
-#include "src/compiler/common-operator.h"
-#include "src/compiler/generic-graph.h"
 #include "src/compiler/instruction.h"
 #include "src/compiler/linkage.h"
-#include "src/compiler/machine-operator.h"
-#include "src/compiler/node.h"
 #include "src/compiler/opcodes.h"
-#include "src/compiler/operator.h"
 
 namespace v8 {
 namespace internal {
@@ -61,12 +56,12 @@
   }
 
   Label* InputLabel(int index) {
-    return gen_->code()->GetLabel(InputBlock(index));
+    return gen_->code()->GetLabel(InputRpo(index));
   }
 
-  BasicBlock* InputBlock(int index) {
-    int block_id = InputInt32(index);
-    return gen_->schedule()->GetBlockById(BasicBlock::Id::FromInt(block_id));
+  BasicBlock::RpoNumber InputRpo(int index) {
+    int rpo_number = InputInt32(index);
+    return BasicBlock::RpoNumber::FromInt(rpo_number);
   }
 
   Register OutputRegister(int index = 0) {
diff --git a/src/compiler/code-generator.cc b/src/compiler/code-generator.cc
index fc62119..bb0ef1b 100644
--- a/src/compiler/code-generator.cc
+++ b/src/compiler/code-generator.cc
@@ -14,7 +14,7 @@
 
 CodeGenerator::CodeGenerator(InstructionSequence* code)
     : code_(code),
-      current_block_(NULL),
+      current_block_(BasicBlock::RpoNumber::Invalid()),
       current_source_position_(SourcePosition::Invalid()),
       masm_(code->zone()->isolate(), NULL, 0),
       resolver_(this),
@@ -103,11 +103,12 @@
   if (instr->IsBlockStart()) {
     // Bind a label for a block start and handle parallel moves.
     BlockStartInstruction* block_start = BlockStartInstruction::cast(instr);
-    current_block_ = block_start->block();
+    current_block_ = block_start->rpo_number();
     if (FLAG_code_comments) {
       // TODO(titzer): these code comments are a giant memory leak.
       Vector<char> buffer = Vector<char>::New(32);
-      SNPrintF(buffer, "-- B%d start --", block_start->block()->id().ToInt());
+      // TODO(dcarney): should not be rpo number there
+      SNPrintF(buffer, "-- B%d (rpo) start --", current_block_.ToInt());
       masm()->RecordComment(buffer.start());
     }
     masm()->bind(block_start->label());
diff --git a/src/compiler/code-generator.h b/src/compiler/code-generator.h
index e740dd3..69d9f63 100644
--- a/src/compiler/code-generator.h
+++ b/src/compiler/code-generator.h
@@ -39,9 +39,8 @@
 
   // Checks if {block} will appear directly after {current_block_} when
   // assembling code, in which case, a fall-through can be used.
-  bool IsNextInAssemblyOrder(const BasicBlock* block) const {
-    return block->rpo_number() == (current_block_->rpo_number() + 1) &&
-           block->deferred() == current_block_->deferred();
+  bool IsNextInAssemblyOrder(BasicBlock::RpoNumber block) const {
+    return current_block_.IsNext(block);
   }
 
   // Record a safepoint with the given pointer map.
@@ -119,7 +118,7 @@
   };
 
   InstructionSequence* code_;
-  BasicBlock* current_block_;
+  BasicBlock::RpoNumber current_block_;
   SourcePosition current_source_position_;
   MacroAssembler masm_;
   GapResolver resolver_;
diff --git a/src/compiler/graph-visualizer.cc b/src/compiler/graph-visualizer.cc
index 5bd8de7..342ab54 100644
--- a/src/compiler/graph-visualizer.cc
+++ b/src/compiler/graph-visualizer.cc
@@ -17,6 +17,9 @@
 #include "src/compiler/node-properties-inl.h"
 #include "src/compiler/opcodes.h"
 #include "src/compiler/operator.h"
+#include "src/compiler/register-allocator.h"
+#include "src/compiler/schedule.h"
+#include "src/compiler/scheduler.h"
 #include "src/ostreams.h"
 
 namespace v8 {
@@ -389,6 +392,395 @@
   GraphVisualizer(os, &tmp_zone, &ad.graph).Print();
   return os;
 }
+
+
+class GraphC1Visualizer {
+ public:
+  GraphC1Visualizer(std::ostream& os, Zone* zone);  // NOLINT
+
+  void PrintCompilation(const CompilationInfo* info);
+  void PrintSchedule(const char* phase, const Schedule* schedule,
+                     const SourcePositionTable* positions,
+                     const InstructionSequence* instructions);
+  void PrintAllocator(const char* phase, const RegisterAllocator* allocator);
+  Zone* zone() const { return zone_; }
+
+ private:
+  void PrintIndent();
+  void PrintStringProperty(const char* name, const char* value);
+  void PrintLongProperty(const char* name, int64_t value);
+  void PrintIntProperty(const char* name, int value);
+  void PrintBlockProperty(const char* name, BasicBlock::Id block_id);
+  void PrintNodeId(Node* n);
+  void PrintNode(Node* n);
+  void PrintInputs(Node* n);
+  void PrintInputs(InputIter* i, int count, const char* prefix);
+  void PrintType(Node* node);
+
+  void PrintLiveRange(LiveRange* range, const char* type);
+  class Tag FINAL BASE_EMBEDDED {
+   public:
+    Tag(GraphC1Visualizer* visualizer, const char* name) {
+      name_ = name;
+      visualizer_ = visualizer;
+      visualizer->PrintIndent();
+      visualizer_->os_ << "begin_" << name << "\n";
+      visualizer->indent_++;
+    }
+
+    ~Tag() {
+      visualizer_->indent_--;
+      visualizer_->PrintIndent();
+      visualizer_->os_ << "end_" << name_ << "\n";
+      DCHECK(visualizer_->indent_ >= 0);
+    }
+
+   private:
+    GraphC1Visualizer* visualizer_;
+    const char* name_;
+  };
+
+  std::ostream& os_;
+  int indent_;
+  Zone* zone_;
+
+  DISALLOW_COPY_AND_ASSIGN(GraphC1Visualizer);
+};
+
+
+void GraphC1Visualizer::PrintIndent() {
+  for (int i = 0; i < indent_; i++) {
+    os_ << "  ";
+  }
+}
+
+
+GraphC1Visualizer::GraphC1Visualizer(std::ostream& os, Zone* zone)
+    : os_(os), indent_(0), zone_(zone) {}
+
+
+void GraphC1Visualizer::PrintStringProperty(const char* name,
+                                            const char* value) {
+  PrintIndent();
+  os_ << name << " \"" << value << "\"\n";
+}
+
+
+void GraphC1Visualizer::PrintLongProperty(const char* name, int64_t value) {
+  PrintIndent();
+  os_ << name << " " << static_cast<int>(value / 1000) << "\n";
+}
+
+
+void GraphC1Visualizer::PrintBlockProperty(const char* name,
+                                           BasicBlock::Id block_id) {
+  PrintIndent();
+  os_ << name << " \"B" << block_id << "\"\n";
+}
+
+
+void GraphC1Visualizer::PrintIntProperty(const char* name, int value) {
+  PrintIndent();
+  os_ << name << " " << value << "\n";
+}
+
+
+void GraphC1Visualizer::PrintCompilation(const CompilationInfo* info) {
+  Tag tag(this, "compilation");
+  if (info->IsOptimizing()) {
+    Handle<String> name = info->function()->debug_name();
+    PrintStringProperty("name", name->ToCString().get());
+    PrintIndent();
+    os_ << "method \"" << name->ToCString().get() << ":"
+        << info->optimization_id() << "\"\n";
+  } else {
+    CodeStub::Major major_key = info->code_stub()->MajorKey();
+    PrintStringProperty("name", CodeStub::MajorName(major_key, false));
+    PrintStringProperty("method", "stub");
+  }
+  PrintLongProperty("date",
+                    static_cast<int64_t>(base::OS::TimeCurrentMillis()));
+}
+
+
+void GraphC1Visualizer::PrintNodeId(Node* n) { os_ << "n" << n->id(); }
+
+
+void GraphC1Visualizer::PrintNode(Node* n) {
+  PrintNodeId(n);
+  os_ << " " << *n->op() << " ";
+  PrintInputs(n);
+}
+
+
+void GraphC1Visualizer::PrintInputs(InputIter* i, int count,
+                                    const char* prefix) {
+  if (count > 0) {
+    os_ << prefix;
+  }
+  while (count > 0) {
+    os_ << " ";
+    PrintNodeId(**i);
+    ++(*i);
+    count--;
+  }
+}
+
+
+void GraphC1Visualizer::PrintInputs(Node* node) {
+  InputIter i = node->inputs().begin();
+  PrintInputs(&i, OperatorProperties::GetValueInputCount(node->op()), " ");
+  PrintInputs(&i, OperatorProperties::GetContextInputCount(node->op()),
+              " Ctx:");
+  PrintInputs(&i, OperatorProperties::GetFrameStateInputCount(node->op()),
+              " FS:");
+  PrintInputs(&i, OperatorProperties::GetEffectInputCount(node->op()), " Eff:");
+  PrintInputs(&i, OperatorProperties::GetControlInputCount(node->op()),
+              " Ctrl:");
+}
+
+
+void GraphC1Visualizer::PrintType(Node* node) {
+  Bounds bounds = NodeProperties::GetBounds(node);
+  os_ << " type:";
+  bounds.upper->PrintTo(os_);
+  os_ << "..";
+  bounds.lower->PrintTo(os_);
+}
+
+
+void GraphC1Visualizer::PrintSchedule(const char* phase,
+                                      const Schedule* schedule,
+                                      const SourcePositionTable* positions,
+                                      const InstructionSequence* instructions) {
+  Tag tag(this, "cfg");
+  PrintStringProperty("name", phase);
+  const BasicBlockVector* rpo = schedule->rpo_order();
+  for (size_t i = 0; i < rpo->size(); i++) {
+    BasicBlock* current = (*rpo)[i];
+    Tag block_tag(this, "block");
+    PrintBlockProperty("name", current->id());
+    PrintIntProperty("from_bci", -1);
+    PrintIntProperty("to_bci", -1);
+
+    PrintIndent();
+    os_ << "predecessors";
+    for (BasicBlock::Predecessors::iterator j = current->predecessors_begin();
+         j != current->predecessors_end(); ++j) {
+      os_ << " \"B" << (*j)->id() << "\"";
+    }
+    os_ << "\n";
+
+    PrintIndent();
+    os_ << "successors";
+    for (BasicBlock::Successors::iterator j = current->successors_begin();
+         j != current->successors_end(); ++j) {
+      os_ << " \"B" << (*j)->id() << "\"";
+    }
+    os_ << "\n";
+
+    PrintIndent();
+    os_ << "xhandlers\n";
+
+    PrintIndent();
+    os_ << "flags\n";
+
+    if (current->dominator() != NULL) {
+      PrintBlockProperty("dominator", current->dominator()->id());
+    }
+
+    PrintIntProperty("loop_depth", current->loop_depth());
+
+    if (instructions->code_start(current) >= 0) {
+      int first_index = instructions->first_instruction_index(current);
+      int last_index = instructions->last_instruction_index(current);
+      PrintIntProperty("first_lir_id", LifetimePosition::FromInstructionIndex(
+                                           first_index).Value());
+      PrintIntProperty("last_lir_id", LifetimePosition::FromInstructionIndex(
+                                          last_index).Value());
+    }
+
+    {
+      Tag states_tag(this, "states");
+      Tag locals_tag(this, "locals");
+      int total = 0;
+      for (BasicBlock::const_iterator i = current->begin(); i != current->end();
+           ++i) {
+        if ((*i)->opcode() == IrOpcode::kPhi) total++;
+      }
+      PrintIntProperty("size", total);
+      PrintStringProperty("method", "None");
+      int index = 0;
+      for (BasicBlock::const_iterator i = current->begin(); i != current->end();
+           ++i) {
+        if ((*i)->opcode() != IrOpcode::kPhi) continue;
+        PrintIndent();
+        os_ << index << " ";
+        PrintNodeId(*i);
+        os_ << " [";
+        PrintInputs(*i);
+        os_ << "]\n";
+        index++;
+      }
+    }
+
+    {
+      Tag HIR_tag(this, "HIR");
+      for (BasicBlock::const_iterator i = current->begin(); i != current->end();
+           ++i) {
+        Node* node = *i;
+        if (node->opcode() == IrOpcode::kPhi) continue;
+        int uses = node->UseCount();
+        PrintIndent();
+        os_ << "0 " << uses << " ";
+        PrintNode(node);
+        if (FLAG_trace_turbo_types) {
+          os_ << " ";
+          PrintType(node);
+        }
+        if (positions != NULL) {
+          SourcePosition position = positions->GetSourcePosition(node);
+          if (!position.IsUnknown()) {
+            DCHECK(!position.IsInvalid());
+            os_ << " pos:" << position.raw();
+          }
+        }
+        os_ << " <|@\n";
+      }
+
+      BasicBlock::Control control = current->control();
+      if (control != BasicBlock::kNone) {
+        PrintIndent();
+        os_ << "0 0 ";
+        if (current->control_input() != NULL) {
+          PrintNode(current->control_input());
+        } else {
+          os_ << -1 - current->id().ToInt() << " Goto";
+        }
+        os_ << " ->";
+        for (BasicBlock::Successors::iterator j = current->successors_begin();
+             j != current->successors_end(); ++j) {
+          os_ << " B" << (*j)->id();
+        }
+        if (FLAG_trace_turbo_types && current->control_input() != NULL) {
+          os_ << " ";
+          PrintType(current->control_input());
+        }
+        os_ << " <|@\n";
+      }
+    }
+
+    if (instructions != NULL) {
+      Tag LIR_tag(this, "LIR");
+      for (int j = instructions->first_instruction_index(current);
+           j <= instructions->last_instruction_index(current); j++) {
+        PrintIndent();
+        os_ << j << " " << *instructions->InstructionAt(j) << " <|@\n";
+      }
+    }
+  }
+}
+
+
+void GraphC1Visualizer::PrintAllocator(const char* phase,
+                                       const RegisterAllocator* allocator) {
+  Tag tag(this, "intervals");
+  PrintStringProperty("name", phase);
+
+  const Vector<LiveRange*>* fixed_d = allocator->fixed_double_live_ranges();
+  for (int i = 0; i < fixed_d->length(); ++i) {
+    PrintLiveRange(fixed_d->at(i), "fixed");
+  }
+
+  const Vector<LiveRange*>* fixed = allocator->fixed_live_ranges();
+  for (int i = 0; i < fixed->length(); ++i) {
+    PrintLiveRange(fixed->at(i), "fixed");
+  }
+
+  const ZoneList<LiveRange*>* live_ranges = allocator->live_ranges();
+  for (int i = 0; i < live_ranges->length(); ++i) {
+    PrintLiveRange(live_ranges->at(i), "object");
+  }
+}
+
+
+void GraphC1Visualizer::PrintLiveRange(LiveRange* range, const char* type) {
+  if (range != NULL && !range->IsEmpty()) {
+    PrintIndent();
+    os_ << range->id() << " " << type;
+    if (range->HasRegisterAssigned()) {
+      InstructionOperand* op = range->CreateAssignedOperand(zone());
+      int assigned_reg = op->index();
+      if (op->IsDoubleRegister()) {
+        os_ << " \"" << DoubleRegister::AllocationIndexToString(assigned_reg)
+            << "\"";
+      } else {
+        DCHECK(op->IsRegister());
+        os_ << " \"" << Register::AllocationIndexToString(assigned_reg) << "\"";
+      }
+    } else if (range->IsSpilled()) {
+      InstructionOperand* op = range->TopLevel()->GetSpillOperand();
+      if (op->IsDoubleStackSlot()) {
+        os_ << " \"double_stack:" << op->index() << "\"";
+      } else if (op->IsStackSlot()) {
+        os_ << " \"stack:" << op->index() << "\"";
+      } else {
+        DCHECK(op->IsConstant());
+        os_ << " \"const(nostack):" << op->index() << "\"";
+      }
+    }
+    int parent_index = -1;
+    if (range->IsChild()) {
+      parent_index = range->parent()->id();
+    } else {
+      parent_index = range->id();
+    }
+    InstructionOperand* op = range->FirstHint();
+    int hint_index = -1;
+    if (op != NULL && op->IsUnallocated()) {
+      hint_index = UnallocatedOperand::cast(op)->virtual_register();
+    }
+    os_ << " " << parent_index << " " << hint_index;
+    UseInterval* cur_interval = range->first_interval();
+    while (cur_interval != NULL && range->Covers(cur_interval->start())) {
+      os_ << " [" << cur_interval->start().Value() << ", "
+          << cur_interval->end().Value() << "[";
+      cur_interval = cur_interval->next();
+    }
+
+    UsePosition* current_pos = range->first_pos();
+    while (current_pos != NULL) {
+      if (current_pos->RegisterIsBeneficial() || FLAG_trace_all_uses) {
+        os_ << " " << current_pos->pos().Value() << " M";
+      }
+      current_pos = current_pos->next();
+    }
+
+    os_ << " \"\"\n";
+  }
+}
+
+
+std::ostream& operator<<(std::ostream& os, const AsC1VCompilation& ac) {
+  Zone tmp_zone(ac.info_->isolate());
+  GraphC1Visualizer(os, &tmp_zone).PrintCompilation(ac.info_);
+  return os;
+}
+
+
+std::ostream& operator<<(std::ostream& os, const AsC1V& ac) {
+  Zone tmp_zone(ac.schedule_->zone()->isolate());
+  GraphC1Visualizer(os, &tmp_zone)
+      .PrintSchedule(ac.phase_, ac.schedule_, ac.positions_, ac.instructions_);
+  return os;
+}
+
+
+std::ostream& operator<<(std::ostream& os, const AsC1VAllocator& ac) {
+  Zone tmp_zone(ac.allocator_->code()->zone()->isolate());
+  GraphC1Visualizer(os, &tmp_zone).PrintAllocator(ac.phase_, ac.allocator_);
+  return os;
+}
 }
 }
 }  // namespace v8::internal::compiler
diff --git a/src/compiler/graph-visualizer.h b/src/compiler/graph-visualizer.h
index ccb2289..7212a4f 100644
--- a/src/compiler/graph-visualizer.h
+++ b/src/compiler/graph-visualizer.h
@@ -9,9 +9,17 @@
 
 namespace v8 {
 namespace internal {
+
+class CompilationInfo;
+
 namespace compiler {
 
 class Graph;
+class InstructionSequence;
+class RegisterAllocator;
+class Schedule;
+class SourcePositionTable;
+
 
 struct AsDOT {
   explicit AsDOT(const Graph& g) : graph(g) {}
@@ -28,6 +36,39 @@
 
 std::ostream& operator<<(std::ostream& os, const AsJSON& ad);
 
+struct AsC1VCompilation {
+  explicit AsC1VCompilation(const CompilationInfo* info) : info_(info) {}
+  const CompilationInfo* info_;
+};
+
+
+struct AsC1V {
+  AsC1V(const char* phase, const Schedule* schedule,
+        const SourcePositionTable* positions = NULL,
+        const InstructionSequence* instructions = NULL)
+      : schedule_(schedule),
+        instructions_(instructions),
+        positions_(positions),
+        phase_(phase) {}
+  const Schedule* schedule_;
+  const InstructionSequence* instructions_;
+  const SourcePositionTable* positions_;
+  const char* phase_;
+};
+
+struct AsC1VAllocator {
+  explicit AsC1VAllocator(const char* phase,
+                          const RegisterAllocator* allocator = NULL)
+      : phase_(phase), allocator_(allocator) {}
+  const char* phase_;
+  const RegisterAllocator* allocator_;
+};
+
+std::ostream& operator<<(std::ostream& os, const AsDOT& ad);
+std::ostream& operator<<(std::ostream& os, const AsC1VCompilation& ac);
+std::ostream& operator<<(std::ostream& os, const AsC1V& ac);
+std::ostream& operator<<(std::ostream& os, const AsC1VAllocator& ac);
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/ia32/code-generator-ia32.cc b/src/compiler/ia32/code-generator-ia32.cc
index 454a02f..8543fd7 100644
--- a/src/compiler/ia32/code-generator-ia32.cc
+++ b/src/compiler/ia32/code-generator-ia32.cc
@@ -195,7 +195,7 @@
       break;
     }
     case kArchJmp:
-      __ jmp(code()->GetLabel(i.InputBlock(0)));
+      __ jmp(code()->GetLabel(i.InputRpo(0)));
       break;
     case kArchNop:
       // don't emit code for nops.
@@ -470,8 +470,10 @@
 
   // Emit a branch. The true and false targets are always the last two inputs
   // to the instruction.
-  BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
-  BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
+  BasicBlock::RpoNumber tblock =
+      i.InputRpo(static_cast<int>(instr->InputCount()) - 2);
+  BasicBlock::RpoNumber fblock =
+      i.InputRpo(static_cast<int>(instr->InputCount()) - 1);
   bool fallthru = IsNextInAssemblyOrder(fblock);
   Label* tlabel = code()->GetLabel(tblock);
   Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
diff --git a/src/compiler/ia32/instruction-selector-ia32.cc b/src/compiler/ia32/instruction-selector-ia32.cc
index d3b1e00..d6edb45 100644
--- a/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/src/compiler/ia32/instruction-selector-ia32.cc
@@ -260,7 +260,7 @@
 
   // TODO(turbofan): match complex addressing modes.
   if (g.CanBeImmediate(right)) {
-    inputs[input_count++] = g.Use(left);
+    inputs[input_count++] = g.UseRegister(left);
     inputs[input_count++] = g.UseImmediate(right);
   } else {
     if (node->op()->HasProperty(Operator::kCommutative) &&
@@ -315,7 +315,7 @@
   IA32OperandGenerator g(this);
   Int32BinopMatcher m(node);
   if (m.right().Is(-1)) {
-    Emit(kIA32Not, g.DefineSameAsFirst(node), g.Use(m.left().node()));
+    Emit(kIA32Not, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
   } else {
     VisitBinop(this, node, kIA32Xor);
   }
@@ -330,7 +330,7 @@
   Node* right = node->InputAt(1);
 
   if (g.CanBeImmediate(right)) {
-    selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(left),
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
                    g.UseImmediate(right));
   } else {
     Int32BinopMatcher m(node);
@@ -340,7 +340,7 @@
         right = mright.left().node();
       }
     }
-    selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(left),
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
                    g.UseFixed(right, ecx));
   }
 }
diff --git a/src/compiler/instruction-selector-impl.h b/src/compiler/instruction-selector-impl.h
index 1c8c175..b545959 100644
--- a/src/compiler/instruction-selector-impl.h
+++ b/src/compiler/instruction-selector-impl.h
@@ -55,9 +55,9 @@
   }
 
   InstructionOperand* Use(Node* node) {
-    return Use(node,
-               new (zone()) UnallocatedOperand(
-                   UnallocatedOperand::ANY, UnallocatedOperand::USED_AT_START));
+    return Use(
+        node, new (zone()) UnallocatedOperand(
+                  UnallocatedOperand::NONE, UnallocatedOperand::USED_AT_START));
   }
 
   InstructionOperand* UseRegister(Node* node) {
@@ -69,7 +69,7 @@
   // Use register or operand for the node. If a register is chosen, it won't
   // alias any temporary or output registers.
   InstructionOperand* UseUnique(Node* node) {
-    return Use(node, new (zone()) UnallocatedOperand(UnallocatedOperand::ANY));
+    return Use(node, new (zone()) UnallocatedOperand(UnallocatedOperand::NONE));
   }
 
   // Use a unique register for the node that does not alias any temporary or
@@ -130,7 +130,7 @@
 
   InstructionOperand* Label(BasicBlock* block) {
     // TODO(bmeurer): We misuse ImmediateOperand here.
-    return TempImmediate(block->id().ToInt());
+    return TempImmediate(block->rpo_number());
   }
 
  protected:
diff --git a/src/compiler/instruction-selector.cc b/src/compiler/instruction-selector.cc
index 73c787d..0cfd950 100644
--- a/src/compiler/instruction-selector.cc
+++ b/src/compiler/instruction-selector.cc
@@ -55,11 +55,11 @@
   // Schedule the selected instructions.
   for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) {
     BasicBlock* block = *i;
-    size_t end = block->code_end();
-    size_t start = block->code_start();
+    size_t end = sequence()->code_end(block);
+    size_t start = sequence()->code_start(block);
     sequence()->StartBlock(block);
     while (start-- > end) {
-      sequence()->AddInstruction(instructions_[start], block);
+      sequence()->AddInstruction(instructions_[start]);
     }
     sequence()->EndBlock(block);
   }
@@ -141,8 +141,7 @@
 
 
 bool InstructionSelector::IsNextInAssemblyOrder(const BasicBlock* block) const {
-  return block->rpo_number() == (current_block_->rpo_number() + 1) &&
-         block->deferred() == current_block_->deferred();
+  return current_block_->GetRpoNumber().IsNext(block->GetRpoNumber());
 }
 
 
@@ -384,9 +383,8 @@
   }
 
   // We're done with the block.
-  // TODO(bmeurer): We should not mutate the schedule.
-  block->set_code_start(static_cast<int>(instructions_.size()));
-  block->set_code_end(current_block_end);
+  sequence()->set_code_start(block, static_cast<int>(instructions_.size()));
+  sequence()->set_code_end(block, current_block_end);
 
   current_block_ = NULL;
 }
@@ -1020,6 +1018,12 @@
 
 void InstructionSelector::VisitCall(Node* node) { UNIMPLEMENTED(); }
 
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+                                      BasicBlock* fbranch) {
+  UNIMPLEMENTED();
+}
+
 #endif  // !V8_TURBOFAN_BACKEND
 
 }  // namespace compiler
diff --git a/src/compiler/instruction.cc b/src/compiler/instruction.cc
index 4a1cd1a..b02f637 100644
--- a/src/compiler/instruction.cc
+++ b/src/compiler/instruction.cc
@@ -6,6 +6,7 @@
 
 #include "src/compiler/common-operator.h"
 #include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph.h"
 
 namespace v8 {
 namespace internal {
@@ -291,7 +292,7 @@
       os << " " << *instr.InputAt(i);
     }
   }
-  return os << "\n";
+  return os;
 }
 
 
@@ -321,6 +322,7 @@
     : zone_(schedule->zone()),
       node_count_(graph->NodeCount()),
       node_map_(zone()->NewArray<int>(node_count_)),
+      block_data_(static_cast<int>(schedule->BasicBlockCount()), zone()),
       linkage_(linkage),
       schedule_(schedule),
       constants_(ConstantMap::key_compare(),
@@ -346,32 +348,36 @@
 }
 
 
-Label* InstructionSequence::GetLabel(BasicBlock* block) {
-  return GetBlockStart(block)->label();
+Label* InstructionSequence::GetLabel(BasicBlock::RpoNumber rpo) {
+  return GetBlockStart(rpo)->label();
 }
 
 
-BlockStartInstruction* InstructionSequence::GetBlockStart(BasicBlock* block) {
-  return BlockStartInstruction::cast(InstructionAt(block->code_start()));
+BlockStartInstruction* InstructionSequence::GetBlockStart(
+    BasicBlock::RpoNumber rpo) {
+  BlockStartInstruction* block_start =
+      BlockStartInstruction::cast(InstructionAt(code_start(rpo)));
+  DCHECK_EQ(rpo.ToInt(), block_start->rpo_number().ToInt());
+  return block_start;
 }
 
 
 void InstructionSequence::StartBlock(BasicBlock* block) {
-  block->set_code_start(static_cast<int>(instructions_.size()));
+  set_code_start(block, static_cast<int>(instructions_.size()));
   BlockStartInstruction* block_start =
       BlockStartInstruction::New(zone(), block);
-  AddInstruction(block_start, block);
+  AddInstruction(block_start);
 }
 
 
 void InstructionSequence::EndBlock(BasicBlock* block) {
   int end = static_cast<int>(instructions_.size());
-  DCHECK(block->code_start() >= 0 && block->code_start() < end);
-  block->set_code_end(end);
+  DCHECK(code_start(block) >= 0 && code_start(block) < end);
+  set_code_end(block, end);
 }
 
 
-int InstructionSequence::AddInstruction(Instruction* instr, BasicBlock* block) {
+int InstructionSequence::AddInstruction(Instruction* instr) {
   // TODO(titzer): the order of these gaps is a holdover from Lithium.
   GapInstruction* gap = GapInstruction::New(zone());
   if (instr->IsControl()) instructions_.push_back(gap);
@@ -395,7 +401,8 @@
     DCHECK_LE(0, instruction_index);
     Instruction* instruction = InstructionAt(instruction_index--);
     if (instruction->IsBlockStart()) {
-      return BlockStartInstruction::cast(instruction)->block();
+      return schedule()->rpo_order()->at(
+          BlockStartInstruction::cast(instruction)->rpo_number().ToSize());
     }
   }
 }
@@ -537,8 +544,8 @@
       os << " loop blocks: [" << block->rpo_number() << ", "
          << block->loop_end() << ")";
     }
-    os << "  instructions: [" << block->code_start() << ", "
-       << block->code_end() << ")\n  predecessors:";
+    os << "  instructions: [" << code.code_start(block) << ", "
+       << code.code_end(block) << ")\n  predecessors:";
 
     for (BasicBlock::Predecessors::iterator iter = block->predecessors_begin();
          iter != block->predecessors_end(); ++iter) {
@@ -560,11 +567,11 @@
     }
 
     ScopedVector<char> buf(32);
-    for (int j = block->first_instruction_index();
-         j <= block->last_instruction_index(); j++) {
+    for (int j = code.first_instruction_index(block);
+         j <= code.last_instruction_index(block); j++) {
       // TODO(svenpanne) Add some basic formatting to our streams.
       SNPrintF(buf, "%5d", j);
-      os << "   " << buf.start() << ": " << *code.InstructionAt(j);
+      os << "   " << buf.start() << ": " << *code.InstructionAt(j) << "\n";
     }
 
     os << "  " << block->control();
diff --git a/src/compiler/instruction.h b/src/compiler/instruction.h
index 2bb2b72..72ea043 100644
--- a/src/compiler/instruction.h
+++ b/src/compiler/instruction.h
@@ -12,10 +12,10 @@
 
 #include "src/compiler/common-operator.h"
 #include "src/compiler/frame.h"
-#include "src/compiler/graph.h"
 #include "src/compiler/instruction-codes.h"
 #include "src/compiler/opcodes.h"
 #include "src/compiler/schedule.h"
+#include "src/compiler/source-position.h"
 // TODO(titzer): don't include the macro-assembler?
 #include "src/macro-assembler.h"
 #include "src/zone-allocator.h"
@@ -596,8 +596,8 @@
 // TODO(titzer): move code_start and code_end from BasicBlock to here.
 class BlockStartInstruction FINAL : public GapInstruction {
  public:
-  BasicBlock* block() const { return block_; }
   Label* label() { return &label_; }
+  BasicBlock::RpoNumber rpo_number() const { return rpo_number_; }
 
   static BlockStartInstruction* New(Zone* zone, BasicBlock* block) {
     void* buffer = zone->New(sizeof(BlockStartInstruction));
@@ -611,9 +611,10 @@
 
  private:
   explicit BlockStartInstruction(BasicBlock* block)
-      : GapInstruction(kBlockStartInstruction), block_(block) {}
+      : GapInstruction(kBlockStartInstruction),
+        rpo_number_(block->GetRpoNumber()) {}
 
-  BasicBlock* block_;
+  BasicBlock::RpoNumber rpo_number_;
   Label label_;
 };
 
@@ -781,8 +782,6 @@
     return block->loop_header();
   }
 
-  int GetLoopEnd(BasicBlock* block) const { return block->loop_end(); }
-
   BasicBlock* GetBasicBlock(int instruction_index);
 
   int GetVirtualRegister(const Node* node);
@@ -797,8 +796,8 @@
 
   void AddGapMove(int index, InstructionOperand* from, InstructionOperand* to);
 
-  Label* GetLabel(BasicBlock* block);
-  BlockStartInstruction* GetBlockStart(BasicBlock* block);
+  Label* GetLabel(BasicBlock::RpoNumber rpo);
+  BlockStartInstruction* GetBlockStart(BasicBlock::RpoNumber rpo);
 
   typedef InstructionDeque::const_iterator const_iterator;
   const_iterator begin() const { return instructions_.begin(); }
@@ -821,10 +820,32 @@
   const PointerMapDeque* pointer_maps() const { return &pointer_maps_; }
   Zone* zone() const { return zone_; }
 
-  // Used by the code generator while adding instructions.
-  int AddInstruction(Instruction* instr, BasicBlock* block);
+  // Used by the instruction selector while adding instructions.
+  int AddInstruction(Instruction* instr);
   void StartBlock(BasicBlock* block);
   void EndBlock(BasicBlock* block);
+  void set_code_start(BasicBlock* block, int start) {
+    return GetBlockData(block->GetRpoNumber()).set_code_start(start);
+  }
+  void set_code_end(BasicBlock* block, int end) {
+    return GetBlockData(block->GetRpoNumber()).set_code_end(end);
+  }
+  // TODO(dcarney): use RpoNumber for all of the below.
+  int code_start(BasicBlock::RpoNumber rpo_number) const {
+    return GetBlockData(rpo_number).code_start();
+  }
+  int code_start(BasicBlock* block) const {
+    return GetBlockData(block->GetRpoNumber()).code_start();
+  }
+  int code_end(BasicBlock* block) const {
+    return GetBlockData(block->GetRpoNumber()).code_end();
+  }
+  int first_instruction_index(BasicBlock* block) const {
+    return GetBlockData(block->GetRpoNumber()).first_instruction_index();
+  }
+  int last_instruction_index(BasicBlock* block) const {
+    return GetBlockData(block->GetRpoNumber()).last_instruction_index();
+  }
 
   int AddConstant(Node* node, Constant constant) {
     int virtual_register = GetVirtualRegister(node);
@@ -868,14 +889,51 @@
   int GetFrameStateDescriptorCount();
 
  private:
+  class BlockData {
+   public:
+    BlockData() : code_start_(-1), code_end_(-1) {}
+    // Instruction indexes (used by the register allocator).
+    int first_instruction_index() const {
+      DCHECK(code_start_ >= 0);
+      DCHECK(code_end_ > 0);
+      DCHECK(code_end_ >= code_start_);
+      return code_start_;
+    }
+    int last_instruction_index() const {
+      DCHECK(code_start_ >= 0);
+      DCHECK(code_end_ > 0);
+      DCHECK(code_end_ >= code_start_);
+      return code_end_ - 1;
+    }
+
+    int32_t code_start() const { return code_start_; }
+    void set_code_start(int32_t start) { code_start_ = start; }
+
+    int32_t code_end() const { return code_end_; }
+    void set_code_end(int32_t end) { code_end_ = end; }
+
+   private:
+    int32_t code_start_;  // start index of arch-specific code.
+    int32_t code_end_;    // end index of arch-specific code.
+  };
+
+  const BlockData& GetBlockData(BasicBlock::RpoNumber rpo_number) const {
+    return block_data_[rpo_number.ToSize()];
+  }
+  BlockData& GetBlockData(BasicBlock::RpoNumber rpo_number) {
+    return block_data_[rpo_number.ToSize()];
+  }
+
   friend std::ostream& operator<<(std::ostream& os,
                                   const InstructionSequence& code);
 
   typedef std::set<int, std::less<int>, ZoneIntAllocator> VirtualRegisterSet;
+  typedef ZoneVector<BlockData> BlockDataVector;
 
   Zone* zone_;
   int node_count_;
   int* node_map_;
+  BlockDataVector block_data_;
   Linkage* linkage_;
   Schedule* schedule_;
   ConstantMap constants_;
diff --git a/src/compiler/js-inlining.cc b/src/compiler/js-inlining.cc
index 00e4011..5e868e9 100644
--- a/src/compiler/js-inlining.cc
+++ b/src/compiler/js-inlining.cc
@@ -241,7 +241,7 @@
   Node* context = jsgraph->graph()->NewNode(
       simplified.LoadField(AccessBuilder::ForJSFunctionContext()),
       NodeProperties::GetValueInput(call, 0),
-      NodeProperties::GetEffectInput(call));
+      NodeProperties::GetEffectInput(call), control);
 
   // Context is last argument.
   int inlinee_context_index = static_cast<int>(total_parameters()) - 1;
diff --git a/src/compiler/mips/code-generator-mips.cc b/src/compiler/mips/code-generator-mips.cc
index 19d1b02..8dcca27 100644
--- a/src/compiler/mips/code-generator-mips.cc
+++ b/src/compiler/mips/code-generator-mips.cc
@@ -154,7 +154,7 @@
       break;
     }
     case kArchJmp:
-      __ Branch(code_->GetLabel(i.InputBlock(0)));
+      __ Branch(code_->GetLabel(i.InputRpo(0)));
       break;
     case kArchNop:
       // don't emit code for nops.
@@ -393,8 +393,10 @@
 
   // Emit a branch. The true and false targets are always the last two inputs
   // to the instruction.
-  BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
-  BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
+  BasicBlock::RpoNumber tblock =
+      i.InputRpo(static_cast<int>(instr->InputCount()) - 2);
+  BasicBlock::RpoNumber fblock =
+      i.InputRpo(static_cast<int>(instr->InputCount()) - 1);
   bool fallthru = IsNextInAssemblyOrder(fblock);
   Label* tlabel = code()->GetLabel(tblock);
   Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
diff --git a/src/compiler/node-aux-data-inl.h b/src/compiler/node-aux-data-inl.h
index 79f1abf..d8db4b9 100644
--- a/src/compiler/node-aux-data-inl.h
+++ b/src/compiler/node-aux-data-inl.h
@@ -29,7 +29,7 @@
 
 
 template <class T>
-T NodeAuxData<T>::Get(Node* node) {
+T NodeAuxData<T>::Get(Node* node) const {
   int id = node->id();
   if (id >= static_cast<int>(aux_data_.size())) {
     return T();
diff --git a/src/compiler/node-aux-data.h b/src/compiler/node-aux-data.h
index 7acce33..a08dc58 100644
--- a/src/compiler/node-aux-data.h
+++ b/src/compiler/node-aux-data.h
@@ -21,7 +21,7 @@
   inline explicit NodeAuxData(Zone* zone);
 
   inline void Set(Node* node, const T& data);
-  inline T Get(Node* node);
+  inline T Get(Node* node) const;
 
  private:
   ZoneVector<T> aux_data_;
diff --git a/src/compiler/operator-properties-inl.h b/src/compiler/operator-properties-inl.h
index 1c9ef00..2ddaeca 100644
--- a/src/compiler/operator-properties-inl.h
+++ b/src/compiler/operator-properties-inl.h
@@ -118,6 +118,7 @@
     case IrOpcode::kEffectPhi:
     case IrOpcode::kLoad:
     case IrOpcode::kLoadElement:
+    case IrOpcode::kLoadField:
       return 1;
 #define OPCODE_CASE(x) case IrOpcode::k##x:
       CONTROL_OP_LIST(OPCODE_CASE)
diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc
index 7da2fb5..81a0ec5 100644
--- a/src/compiler/pipeline.cc
+++ b/src/compiler/pipeline.cc
@@ -90,13 +90,28 @@
 }
 
 
+void Pipeline::PrintCompilationStart() {
+  std::ofstream turbo_cfg_stream;
+  OpenTurboCfgFile(&turbo_cfg_stream);
+  turbo_cfg_stream << AsC1VCompilation(info());
+}
+
+
+void Pipeline::OpenTurboCfgFile(std::ofstream* stream) {
+  char buffer[512];
+  Vector<char> filename(buffer, sizeof(buffer));
+  isolate()->GetTurboCfgFileName(filename);
+  stream->open(filename.start(), std::fstream::out | std::fstream::app);
+}
+
+
 void Pipeline::VerifyAndPrintGraph(Graph* graph, const char* phase) {
   if (FLAG_trace_turbo) {
     char buffer[256];
     Vector<char> filename(buffer, sizeof(buffer));
+    SmartArrayPointer<char> functionname;
     if (!info_->shared_info().is_null()) {
-      SmartArrayPointer<char> functionname =
-          info_->shared_info()->DebugName()->ToCString();
+      functionname = info_->shared_info()->DebugName()->ToCString();
       if (strlen(functionname.get()) > 0) {
         SNPrintF(filename, "turbo-%s-%s", functionname.get(), phase);
       } else {
@@ -132,6 +147,24 @@
 }
 
 
+void Pipeline::PrintScheduleAndInstructions(
+    const char* phase, const Schedule* schedule,
+    const SourcePositionTable* positions,
+    const InstructionSequence* instructions) {
+  std::ofstream turbo_cfg_stream;
+  OpenTurboCfgFile(&turbo_cfg_stream);
+  turbo_cfg_stream << AsC1V(phase, schedule, positions, instructions);
+}
+
+
+void Pipeline::PrintAllocator(const char* phase,
+                              const RegisterAllocator* allocator) {
+  std::ofstream turbo_cfg_stream;
+  OpenTurboCfgFile(&turbo_cfg_stream);
+  turbo_cfg_stream << AsC1VAllocator(phase, allocator);
+}
+
+
 class AstGraphBuilderWithPositions : public AstGraphBuilder {
  public:
   explicit AstGraphBuilderWithPositions(CompilationInfo* info, JSGraph* jsgraph,
@@ -188,6 +221,7 @@
        << "Begin compiling method "
        << info()->function()->debug_name()->ToCString().get()
        << " using Turbofan" << std::endl;
+    PrintCompilationStart();
   }
 
   // Build the graph.
@@ -405,6 +439,8 @@
     OFStream os(stdout);
     os << "----- Instruction sequence before register allocation -----\n"
        << sequence;
+    PrintScheduleAndInstructions("CodeGen", schedule, source_positions,
+                                 &sequence);
   }
 
   // Allocate registers.
@@ -419,6 +455,9 @@
       linkage->info()->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
       return Handle<Code>::null();
     }
+    if (FLAG_trace_turbo) {
+      PrintAllocator("CodeGen", &allocator);
+    }
   }
 
   if (FLAG_trace_turbo) {
diff --git a/src/compiler/pipeline.h b/src/compiler/pipeline.h
index 9f8241a..f47c3c1 100644
--- a/src/compiler/pipeline.h
+++ b/src/compiler/pipeline.h
@@ -5,6 +5,8 @@
 #ifndef V8_COMPILER_PIPELINE_H_
 #define V8_COMPILER_PIPELINE_H_
 
+#include <fstream>  // NOLINT(readability/streams)
+
 #include "src/v8.h"
 
 #include "src/compiler.h"
@@ -18,9 +20,11 @@
 
 // Clients of this interface shouldn't depend on lots of compiler internals.
 class Graph;
+class InstructionSequence;
+class Linkage;
+class RegisterAllocator;
 class Schedule;
 class SourcePositionTable;
-class Linkage;
 
 class Pipeline {
  public:
@@ -48,6 +52,12 @@
   Zone* zone() { return info_->zone(); }
 
   Schedule* ComputeSchedule(Graph* graph);
+  void OpenTurboCfgFile(std::ofstream* stream);
+  void PrintCompilationStart();
+  void PrintScheduleAndInstructions(const char* phase, const Schedule* schedule,
+                                    const SourcePositionTable* positions,
+                                    const InstructionSequence* instructions);
+  void PrintAllocator(const char* phase, const RegisterAllocator* allocator);
   void VerifyAndPrintGraph(Graph* graph, const char* phase);
   Handle<Code> GenerateCode(Linkage* linkage, Graph* graph, Schedule* schedule,
                             SourcePositionTable* source_positions);
diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc
index fbaf4fa..c0ee02f 100644
--- a/src/compiler/register-allocator.cc
+++ b/src/compiler/register-allocator.cc
@@ -559,10 +559,11 @@
                                             BitVector* live_out) {
   // Add an interval that includes the entire block to the live range for
   // each live_out value.
-  LifetimePosition start =
-      LifetimePosition::FromInstructionIndex(block->first_instruction_index());
-  LifetimePosition end = LifetimePosition::FromInstructionIndex(
-                             block->last_instruction_index()).NextInstruction();
+  LifetimePosition start = LifetimePosition::FromInstructionIndex(
+      code()->first_instruction_index(block));
+  LifetimePosition end =
+      LifetimePosition::FromInstructionIndex(
+          code()->last_instruction_index(block)).NextInstruction();
   BitVector::Iterator iterator(live_out);
   while (!iterator.Done()) {
     int operand_index = iterator.Current();
@@ -651,7 +652,7 @@
 
 
 GapInstruction* RegisterAllocator::GetLastGap(BasicBlock* block) {
-  int last_instruction = block->last_instruction_index();
+  int last_instruction = code()->last_instruction_index(block);
   return code()->GapAt(last_instruction - 1);
 }
 
@@ -729,8 +730,8 @@
 
 
 void RegisterAllocator::MeetRegisterConstraints(BasicBlock* block) {
-  int start = block->first_instruction_index();
-  int end = block->last_instruction_index();
+  int start = code()->first_instruction_index(block);
+  int end = code()->last_instruction_index(block);
   DCHECK_NE(-1, start);
   for (int i = start; i <= end; ++i) {
     if (code()->IsGapAt(i)) {
@@ -752,7 +753,7 @@
 
 void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock(
     BasicBlock* block) {
-  int end = block->last_instruction_index();
+  int end = code()->last_instruction_index(block);
   Instruction* last_instruction = InstructionAt(end);
   for (size_t i = 0; i < last_instruction->OutputCount(); i++) {
     InstructionOperand* output_operand = last_instruction->OutputAt(i);
@@ -773,7 +774,7 @@
       for (BasicBlock::Successors::iterator succ = block->successors_begin();
            succ != block->successors_end(); ++succ) {
         DCHECK((*succ)->PredecessorCount() == 1);
-        int gap_index = (*succ)->first_instruction_index() + 1;
+        int gap_index = code()->first_instruction_index(*succ) + 1;
         DCHECK(code()->IsGapAt(gap_index));
 
         // Create an unconstrained operand for the same virtual register
@@ -790,7 +791,7 @@
       for (BasicBlock::Successors::iterator succ = block->successors_begin();
            succ != block->successors_end(); ++succ) {
         DCHECK((*succ)->PredecessorCount() == 1);
-        int gap_index = (*succ)->first_instruction_index() + 1;
+        int gap_index = code()->first_instruction_index(*succ) + 1;
         range->SetSpillStartIndex(gap_index);
 
         // This move to spill operand is not a real use. Liveness analysis
@@ -937,12 +938,12 @@
 
 void RegisterAllocator::ProcessInstructions(BasicBlock* block,
                                             BitVector* live) {
-  int block_start = block->first_instruction_index();
+  int block_start = code()->first_instruction_index(block);
 
   LifetimePosition block_start_position =
       LifetimePosition::FromInstructionIndex(block_start);
 
-  for (int index = block->last_instruction_index(); index >= block_start;
+  for (int index = code()->last_instruction_index(block); index >= block_start;
        index--) {
     LifetimePosition curr_position =
         LifetimePosition::FromInstructionIndex(index);
@@ -1081,19 +1082,21 @@
       BasicBlock* cur_block = block->PredecessorAt(j);
       // The gap move must be added without any special processing as in
       // the AddConstraintsGapMove.
-      code()->AddGapMove(cur_block->last_instruction_index() - 1, operand,
+      code()->AddGapMove(code()->last_instruction_index(cur_block) - 1, operand,
                          phi_operand);
 
-      Instruction* branch = InstructionAt(cur_block->last_instruction_index());
+      Instruction* branch =
+          InstructionAt(code()->last_instruction_index(cur_block));
       DCHECK(!branch->HasPointerMap());
       USE(branch);
     }
 
     LiveRange* live_range = LiveRangeFor(phi_vreg);
-    BlockStartInstruction* block_start = code()->GetBlockStart(block);
+    BlockStartInstruction* block_start =
+        code()->GetBlockStart(block->GetRpoNumber());
     block_start->GetOrCreateParallelMove(GapInstruction::START, code_zone())
         ->AddMove(phi_operand, live_range->GetSpillOperand(), code_zone());
-    live_range->SetSpillStartIndex(block->first_instruction_index());
+    live_range->SetSpillStartIndex(code()->first_instruction_index(block));
 
     // We use the phi-ness of some nodes in some later heuristics.
     live_range->set_is_phi(true);
@@ -1147,10 +1150,10 @@
 
 void RegisterAllocator::ResolveControlFlow(LiveRange* range, BasicBlock* block,
                                            BasicBlock* pred) {
-  LifetimePosition pred_end =
-      LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
-  LifetimePosition cur_start =
-      LifetimePosition::FromInstructionIndex(block->first_instruction_index());
+  LifetimePosition pred_end = LifetimePosition::FromInstructionIndex(
+      code()->last_instruction_index(pred));
+  LifetimePosition cur_start = LifetimePosition::FromInstructionIndex(
+      code()->first_instruction_index(block));
   LiveRange* pred_cover = NULL;
   LiveRange* cur_cover = NULL;
   LiveRange* cur_range = range;
@@ -1175,12 +1178,13 @@
     if (!pred_op->Equals(cur_op)) {
       GapInstruction* gap = NULL;
       if (block->PredecessorCount() == 1) {
-        gap = code()->GapAt(block->first_instruction_index());
+        gap = code()->GapAt(code()->first_instruction_index(block));
       } else {
         DCHECK(pred->SuccessorCount() == 1);
         gap = GetLastGap(pred);
 
-        Instruction* branch = InstructionAt(pred->last_instruction_index());
+        Instruction* branch =
+            InstructionAt(code()->last_instruction_index(pred));
         DCHECK(!branch->HasPointerMap());
         USE(branch);
       }
@@ -1320,7 +1324,7 @@
       DCHECK(hint != NULL);
 
       LifetimePosition block_start = LifetimePosition::FromInstructionIndex(
-          block->first_instruction_index());
+          code()->first_instruction_index(block));
       Define(block_start, phi_operand, hint);
     }
 
@@ -1333,9 +1337,9 @@
       // for each value live on entry to the header.
       BitVector::Iterator iterator(live);
       LifetimePosition start = LifetimePosition::FromInstructionIndex(
-          block->first_instruction_index());
+          code()->first_instruction_index(block));
       int end_index =
-          code()->BlockAt(block->loop_end())->last_instruction_index();
+          code()->last_instruction_index(code()->BlockAt(block->loop_end()));
       LifetimePosition end =
           LifetimePosition::FromInstructionIndex(end_index).NextInstruction();
       while (!iterator.Done()) {
@@ -1967,7 +1971,7 @@
     // If possible try to move spilling position backwards to loop header.
     // This will reduce number of memory moves on the back edge.
     LifetimePosition loop_start = LifetimePosition::FromInstructionIndex(
-        loop_header->first_instruction_index());
+        code()->first_instruction_index(loop_header));
 
     if (range->Covers(loop_start)) {
       if (prev_use == NULL || prev_use->pos().Value() < loop_start.Value()) {
@@ -2105,7 +2109,7 @@
   if (block == end_block && !end_block->IsLoopHeader()) return end;
 
   return LifetimePosition::FromInstructionIndex(
-      block->first_instruction_index());
+      code()->first_instruction_index(block));
 }
 
 
diff --git a/src/compiler/schedule.cc b/src/compiler/schedule.cc
index d641b4b..ac6b440 100644
--- a/src/compiler/schedule.cc
+++ b/src/compiler/schedule.cc
@@ -18,9 +18,6 @@
       loop_header_(NULL),
       loop_depth_(0),
       loop_end_(-1),
-      code_start_(-1),
-      code_end_(-1),
-      deferred_(false),
       control_(kNone),
       control_input_(NULL),
       nodes_(zone),
@@ -86,14 +83,6 @@
 void BasicBlock::set_loop_end(int32_t loop_end) { loop_end_ = loop_end; }
 
 
-void BasicBlock::set_code_start(int32_t code_start) {
-  code_start_ = code_start;
-}
-
-
-void BasicBlock::set_code_end(int32_t code_end) { code_end_ = code_end; }
-
-
 void BasicBlock::set_loop_header(BasicBlock* loop_header) {
   loop_header_ = loop_header;
 }
diff --git a/src/compiler/schedule.h b/src/compiler/schedule.h
index 7def37f..acfc447 100644
--- a/src/compiler/schedule.h
+++ b/src/compiler/schedule.h
@@ -50,24 +50,28 @@
     size_t index_;
   };
 
+  class RpoNumber FINAL {
+   public:
+    int ToInt() const { return static_cast<int>(index_); }
+    size_t ToSize() const { return index_; }
+    static RpoNumber FromInt(int index) {
+      return RpoNumber(static_cast<size_t>(index));
+    }
+    static RpoNumber Invalid() { return RpoNumber(static_cast<size_t>(-1)); }
+
+    bool IsNext(const RpoNumber other) const {
+      return other.index_ == this->index_ + 1;
+    }
+
+   private:
+    explicit RpoNumber(size_t index) : index_(index) {}
+    size_t index_;
+  };
+
   BasicBlock(Zone* zone, Id id);
 
   Id id() const { return id_; }
 
-  // Instruction indexes (used by the register allocator).
-  int first_instruction_index() {
-    DCHECK(code_start_ >= 0);
-    DCHECK(code_end_ > 0);
-    DCHECK(code_end_ >= code_start_);
-    return code_start_;
-  }
-  int last_instruction_index() {
-    DCHECK(code_start_ >= 0);
-    DCHECK(code_end_ > 0);
-    DCHECK(code_end_ >= code_start_);
-    return code_end_ - 1;
-  }
-
   // Predecessors and successors.
   typedef ZoneVector<BasicBlock*> Predecessors;
   Predecessors::iterator predecessors_begin() { return predecessors_.begin(); }
@@ -126,17 +130,10 @@
   int32_t loop_end() const { return loop_end_; }
   void set_loop_end(int32_t loop_end);
 
+  RpoNumber GetRpoNumber() const { return RpoNumber::FromInt(rpo_number_); }
   int32_t rpo_number() const { return rpo_number_; }
   void set_rpo_number(int32_t rpo_number);
 
-  int32_t code_start() const { return code_start_; }
-  void set_code_start(int32_t start);
-
-  int32_t code_end() const { return code_end_; }
-  void set_code_end(int32_t end);
-
-  bool deferred() const { return deferred_; }
-
   // Loop membership helpers.
   inline bool IsLoopHeader() const { return loop_end_ >= 0; }
   bool LoopContains(BasicBlock* block) const;
@@ -150,10 +147,7 @@
                              // enclosing loop header.
   int32_t loop_depth_;       // loop nesting, 0 is top-level
   int32_t loop_end_;         // end of the loop, if this block is a loop header.
-  int32_t code_start_;       // start index of arch-specific code.
-  int32_t code_end_;         // end index of arch-specific code.
-  bool deferred_;            // {true} if this block is considered the slow
-                             // path.
+
   Control control_;          // Control at the end of the block.
   Node* control_input_;      // Input value for control.
   NodeVector nodes_;         // nodes of this block in forward order.
@@ -218,11 +212,12 @@
   void AddSuccessor(BasicBlock* block, BasicBlock* succ);
 
   BasicBlockVector* rpo_order() { return &rpo_order_; }
+  const BasicBlockVector* rpo_order() const { return &rpo_order_; }
 
   BasicBlock* start() { return start_; }
   BasicBlock* end() { return end_; }
 
-  Zone* zone() { return zone_; }
+  Zone* zone() const { return zone_; }
 
  private:
   friend class Scheduler;
diff --git a/src/compiler/simplified-lowering.cc b/src/compiler/simplified-lowering.cc
index fe0280b..7a0f324 100644
--- a/src/compiler/simplified-lowering.cc
+++ b/src/compiler/simplified-lowering.cc
@@ -947,7 +947,6 @@
   node->set_op(machine()->Load(access.machine_type));
   Node* offset = jsgraph()->Int32Constant(access.offset - access.tag());
   node->InsertInput(zone(), 1, offset);
-  node->AppendInput(zone(), graph()->start());
 }
 
 
diff --git a/src/compiler/source-position.cc b/src/compiler/source-position.cc
index 1178390..9503010 100644
--- a/src/compiler/source-position.cc
+++ b/src/compiler/source-position.cc
@@ -46,7 +46,7 @@
 }
 
 
-SourcePosition SourcePositionTable::GetSourcePosition(Node* node) {
+SourcePosition SourcePositionTable::GetSourcePosition(Node* node) const {
   return table_.Get(node);
 }
 
diff --git a/src/compiler/source-position.h b/src/compiler/source-position.h
index 778f067..390a17d 100644
--- a/src/compiler/source-position.h
+++ b/src/compiler/source-position.h
@@ -79,7 +79,7 @@
   void AddDecorator();
   void RemoveDecorator();
 
-  SourcePosition GetSourcePosition(Node* node);
+  SourcePosition GetSourcePosition(Node* node) const;
 
  private:
   class Decorator;
diff --git a/src/compiler/x64/code-generator-x64.cc b/src/compiler/x64/code-generator-x64.cc
index fa9f511..4411876 100644
--- a/src/compiler/x64/code-generator-x64.cc
+++ b/src/compiler/x64/code-generator-x64.cc
@@ -233,7 +233,7 @@
       break;
     }
     case kArchJmp:
-      __ jmp(code_->GetLabel(i.InputBlock(0)));
+      __ jmp(code_->GetLabel(i.InputRpo(0)));
       break;
     case kArchNop:
       // don't emit code for nops.
@@ -583,8 +583,10 @@
 
   // Emit a branch. The true and false targets are always the last two inputs
   // to the instruction.
-  BasicBlock* tblock = i.InputBlock(static_cast<int>(instr->InputCount()) - 2);
-  BasicBlock* fblock = i.InputBlock(static_cast<int>(instr->InputCount()) - 1);
+  BasicBlock::RpoNumber tblock =
+      i.InputRpo(static_cast<int>(instr->InputCount()) - 2);
+  BasicBlock::RpoNumber fblock =
+      i.InputRpo(static_cast<int>(instr->InputCount()) - 1);
   bool fallthru = IsNextInAssemblyOrder(fblock);
   Label* tlabel = code()->GetLabel(tblock);
   Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc
index 9540753..6a3d9c0 100644
--- a/src/compiler/x64/instruction-selector-x64.cc
+++ b/src/compiler/x64/instruction-selector-x64.cc
@@ -241,7 +241,7 @@
 
   // TODO(turbofan): match complex addressing modes.
   if (g.CanBeImmediate(right)) {
-    inputs[input_count++] = g.Use(left);
+    inputs[input_count++] = g.UseRegister(left);
     inputs[input_count++] = g.UseImmediate(right);
   } else {
     if (node->op()->HasProperty(Operator::kCommutative) &&
@@ -305,7 +305,7 @@
   X64OperandGenerator g(this);
   Uint32BinopMatcher m(node);
   if (m.right().Is(-1)) {
-    Emit(kX64Not32, g.DefineSameAsFirst(node), g.Use(m.left().node()));
+    Emit(kX64Not32, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
   } else {
     VisitBinop(this, node, kX64Xor32);
   }
@@ -316,7 +316,7 @@
   X64OperandGenerator g(this);
   Uint64BinopMatcher m(node);
   if (m.right().Is(-1)) {
-    Emit(kX64Not, g.DefineSameAsFirst(node), g.Use(m.left().node()));
+    Emit(kX64Not, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
   } else {
     VisitBinop(this, node, kX64Xor);
   }
@@ -332,7 +332,7 @@
   Node* right = node->InputAt(1);
 
   if (g.CanBeImmediate(right)) {
-    selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(left),
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
                    g.UseImmediate(right));
   } else {
     Int32BinopMatcher m(node);
@@ -342,7 +342,7 @@
         right = mright.left().node();
       }
     }
-    selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(left),
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
                    g.UseFixed(right, rcx));
   }
 }
@@ -357,7 +357,7 @@
   Node* right = node->InputAt(1);
 
   if (g.CanBeImmediate(right)) {
-    selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(left),
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
                    g.UseImmediate(right));
   } else {
     Int64BinopMatcher m(node);
@@ -367,7 +367,7 @@
         right = mright.left().node();
       }
     }
-    selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(left),
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
                    g.UseFixed(right, rcx));
   }
 }
@@ -472,7 +472,7 @@
   X64OperandGenerator g(this);
   Int32BinopMatcher m(node);
   if (m.left().Is(0)) {
-    Emit(kX64Neg32, g.DefineSameAsFirst(node), g.Use(m.right().node()));
+    Emit(kX64Neg32, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
   } else {
     VisitBinop(this, node, kX64Sub32);
   }
@@ -483,7 +483,7 @@
   X64OperandGenerator g(this);
   Int64BinopMatcher m(node);
   if (m.left().Is(0)) {
-    Emit(kX64Neg, g.DefineSameAsFirst(node), g.Use(m.right().node()));
+    Emit(kX64Neg, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
   } else {
     VisitBinop(this, node, kX64Sub);
   }
diff --git a/src/d8.cc b/src/d8.cc
index d1929b0..a36ec5e 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -1625,6 +1625,7 @@
   StartupDataHandler startup_data(options.natives_blob, options.snapshot_blob);
 #endif
   SetFlagsFromString("--trace-hydrogen-file=hydrogen.cfg");
+  SetFlagsFromString("--trace-turbo-cfg-file=turbo.cfg");
   SetFlagsFromString("--redirect-code-traces-to=code.asm");
   ShellArrayBufferAllocator array_buffer_allocator;
   MockArrayBufferAllocator mock_arraybuffer_allocator;
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index ded5bcf..83b0abf 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -333,6 +333,8 @@
 // Flags for TurboFan.
 DEFINE_STRING(turbo_filter, "~", "optimization filter for TurboFan compiler")
 DEFINE_BOOL(trace_turbo, false, "trace generated TurboFan IR")
+DEFINE_STRING(trace_turbo_cfg_file, NULL,
+              "trace turbo cfg graph (for C1 visualizer) to a given file name")
 DEFINE_BOOL(trace_turbo_types, true, "trace generated TurboFan types")
 DEFINE_BOOL(trace_turbo_scheduler, false, "trace generated TurboFan scheduler")
 DEFINE_BOOL(turbo_asm, false, "enable TurboFan for asm.js code")
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc
index 1d56be7..62f6d62 100644
--- a/src/heap/spaces.cc
+++ b/src/heap/spaces.cc
@@ -1005,7 +1005,7 @@
   int size = 0;
   switch (identity()) {
     case OLD_POINTER_SPACE:
-      size = (112 + constant_pool_delta) * kPointerSize * KB;
+      size = (128 + constant_pool_delta) * kPointerSize * KB;
       break;
     case OLD_DATA_SPACE:
       size = 192 * KB;
diff --git a/src/ic/ic.cc b/src/ic/ic.cc
index 1c621fe..ca9e00a 100644
--- a/src/ic/ic.cc
+++ b/src/ic/ic.cc
@@ -122,7 +122,7 @@
       modifier = GetTransitionMarkModifier(
           KeyedStoreIC::GetKeyedAccessStoreMode(extra_state));
     }
-    PrintF(" (%c->%c%s)", TransitionMarkFromState(old_state),
+    PrintF(" (%c->%c%s) ", TransitionMarkFromState(old_state),
            TransitionMarkFromState(new_state), modifier);
 #ifdef OBJECT_PRINT
     OFStream os(stdout);
diff --git a/src/isolate.cc b/src/isolate.cc
index e8e1d66..2874c16 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -4,6 +4,8 @@
 
 #include <stdlib.h>
 
+#include <fstream>  // NOLINT(readability/streams)
+
 #include "src/v8.h"
 
 #include "src/ast.h"
@@ -1951,6 +1953,16 @@
 
   runtime_profiler_ = new RuntimeProfiler(this);
 
+  if (FLAG_trace_turbo) {
+    // Erase the file.
+    char buffer[512];
+    Vector<char> filename(buffer, sizeof(buffer));
+    GetTurboCfgFileName(filename);
+    std::ofstream turbo_cfg_stream(filename.start(),
+                                   std::fstream::out | std::fstream::trunc);
+  }
+
+
   // If we are deserializing, log non-function code objects and compiled
   // functions found in the snapshot.
   if (!create_heap_objects &&
@@ -2364,6 +2376,16 @@
 }
 
 
+void Isolate::GetTurboCfgFileName(Vector<char> filename) {
+  if (FLAG_trace_turbo_cfg_file == NULL) {
+    SNPrintF(filename, "turbo-%d-%d.cfg", base::OS::GetCurrentProcessId(),
+             id());
+  } else {
+    StrNCpy(filename, FLAG_trace_turbo_cfg_file, filename.length());
+  }
+}
+
+
 bool StackLimitCheck::JsHasOverflowed() const {
   StackGuard* stack_guard = isolate_->stack_guard();
 #ifdef USE_SIMULATOR
diff --git a/src/isolate.h b/src/isolate.h
index f1e6a3e..7fd4a82 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -1107,6 +1107,8 @@
 
   static Isolate* NewForTesting() { return new Isolate(false); }
 
+  void GetTurboCfgFileName(Vector<char> buffer);
+
  private:
   explicit Isolate(bool enable_serializer);
 
diff --git a/src/scanner.h b/src/scanner.h
index 7f35e71..d40c626 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -219,7 +219,6 @@
       *reinterpret_cast<uint16_t*>(&backing_store_[position_]) =
           unibrow::Utf16::LeadSurrogate(code_unit);
       position_ += kUC16Size;
-      if (position_ >= backing_store_.length()) ExpandBuffer();
       *reinterpret_cast<uint16_t*>(&backing_store_[position_]) =
           unibrow::Utf16::TrailSurrogate(code_unit);
       position_ += kUC16Size;
diff --git a/src/v8natives.js b/src/v8natives.js
index 1d2e030..9bb4b83 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -500,67 +500,68 @@
     "set_",
     "hasSetter_"
   ), $Array(
-    "toString", function() {
+    "toString", function PropertyDescriptor_ToString() {
       return "[object PropertyDescriptor]";
     },
-    "setValue", function(value) {
+    "setValue", function PropertyDescriptor_SetValue(value) {
       this.value_ = value;
       this.hasValue_ = true;
     },
-    "getValue", function() {
+    "getValue", function PropertyDescriptor_GetValue() {
       return this.value_;
     },
-    "hasValue", function() {
+    "hasValue", function PropertyDescriptor_HasValue() {
       return this.hasValue_;
     },
-    "setEnumerable", function(enumerable) {
+    "setEnumerable", function PropertyDescriptor_SetEnumerable(enumerable) {
       this.enumerable_ = enumerable;
         this.hasEnumerable_ = true;
     },
-    "isEnumerable", function () {
+    "isEnumerable", function PropertyDescriptor_IsEnumerable() {
       return this.enumerable_;
     },
-    "hasEnumerable", function() {
+    "hasEnumerable", function PropertyDescriptor_HasEnumerable() {
       return this.hasEnumerable_;
     },
-    "setWritable", function(writable) {
+    "setWritable", function PropertyDescriptor_SetWritable(writable) {
       this.writable_ = writable;
       this.hasWritable_ = true;
     },
-    "isWritable", function() {
+    "isWritable", function PropertyDescriptor_IsWritable() {
       return this.writable_;
     },
-    "hasWritable", function() {
+    "hasWritable", function PropertyDescriptor_HasWritable() {
       return this.hasWritable_;
     },
-    "setConfigurable", function(configurable) {
+    "setConfigurable",
+    function PropertyDescriptor_SetConfigurable(configurable) {
       this.configurable_ = configurable;
       this.hasConfigurable_ = true;
     },
-    "hasConfigurable", function() {
+    "hasConfigurable", function PropertyDescriptor_HasConfigurable() {
       return this.hasConfigurable_;
     },
-    "isConfigurable", function() {
+    "isConfigurable", function PropertyDescriptor_IsConfigurable() {
       return this.configurable_;
     },
-    "setGet", function(get) {
+    "setGet", function PropertyDescriptor_SetGetter(get) {
       this.get_ = get;
-        this.hasGetter_ = true;
+      this.hasGetter_ = true;
     },
-    "getGet", function() {
+    "getGet", function PropertyDescriptor_GetGetter() {
       return this.get_;
     },
-    "hasGetter", function() {
+    "hasGetter", function PropertyDescriptor_HasGetter() {
       return this.hasGetter_;
     },
-    "setSet", function(set) {
+    "setSet", function PropertyDescriptor_SetSetter(set) {
       this.set_ = set;
       this.hasSetter_ = true;
     },
-    "getSet", function() {
+    "getSet", function PropertyDescriptor_GetSetter() {
       return this.set_;
     },
-    "hasSetter", function() {
+    "hasSetter", function PropertyDescriptor_HasSetter() {
       return this.hasSetter_;
   }));
 
diff --git a/src/version.cc b/src/version.cc
index e712704..f57158e 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
 // system so their names cannot be changed without changing the scripts.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     30
-#define BUILD_NUMBER      9
-#define PATCH_LEVEL       1
+#define BUILD_NUMBER      10
+#define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
 #define IS_CANDIDATE_VERSION 0
diff --git a/src/zone-containers.h b/src/zone-containers.h
index 2ee1780..4998cbf 100644
--- a/src/zone-containers.h
+++ b/src/zone-containers.h
@@ -24,6 +24,12 @@
       : std::vector<T, zone_allocator<T> >(zone_allocator<T>(zone)) {}
 
   // Constructs a new vector and fills it with {size} elements, each
+  // constructed via the default constructor.
+  ZoneVector(int size, Zone* zone)
+      : std::vector<T, zone_allocator<T> >(size, T(), zone_allocator<T>(zone)) {
+  }
+
+  // Constructs a new vector and fills it with {size} elements, each
   // having the value {def}.
   ZoneVector(int size, T def, Zone* zone)
       : std::vector<T, zone_allocator<T> >(size, def, zone_allocator<T>(zone)) {
diff --git a/test/cctest/compiler/test-instruction.cc b/test/cctest/compiler/test-instruction.cc
index 073d584..d0e4762 100644
--- a/test/cctest/compiler/test-instruction.cc
+++ b/test/cctest/compiler/test-instruction.cc
@@ -81,10 +81,10 @@
     return node;
   }
 
-  int NewInstr(BasicBlock* block) {
+  int NewInstr() {
     InstructionCode opcode = static_cast<InstructionCode>(110);
     TestInstr* instr = TestInstr::New(zone(), opcode);
-    return code->AddInstruction(instr, block);
+    return code->AddInstruction(instr);
   }
 
   UnallocatedOperand* NewUnallocated(int vreg) {
@@ -122,7 +122,7 @@
        i++, index++) {
     BasicBlock* block = *i;
     CHECK_EQ(block, R.code->BlockAt(index));
-    CHECK_EQ(-1, R.code->GetLoopEnd(block));
+    CHECK_EQ(-1, block->loop_end());
   }
 }
 
@@ -142,19 +142,19 @@
   R.allocCode();
 
   R.code->StartBlock(b0);
-  int i0 = R.NewInstr(b0);
-  int i1 = R.NewInstr(b0);
+  int i0 = R.NewInstr();
+  int i1 = R.NewInstr();
   R.code->EndBlock(b0);
   R.code->StartBlock(b1);
-  int i2 = R.NewInstr(b1);
-  int i3 = R.NewInstr(b1);
-  int i4 = R.NewInstr(b1);
-  int i5 = R.NewInstr(b1);
+  int i2 = R.NewInstr();
+  int i3 = R.NewInstr();
+  int i4 = R.NewInstr();
+  int i5 = R.NewInstr();
   R.code->EndBlock(b1);
   R.code->StartBlock(b2);
-  int i6 = R.NewInstr(b2);
-  int i7 = R.NewInstr(b2);
-  int i8 = R.NewInstr(b2);
+  int i6 = R.NewInstr();
+  int i7 = R.NewInstr();
+  int i8 = R.NewInstr();
   R.code->EndBlock(b2);
   R.code->StartBlock(b3);
   R.code->EndBlock(b3);
@@ -171,17 +171,17 @@
   CHECK_EQ(b2, R.code->GetBasicBlock(i7));
   CHECK_EQ(b2, R.code->GetBasicBlock(i8));
 
-  CHECK_EQ(b0, R.code->GetBasicBlock(b0->first_instruction_index()));
-  CHECK_EQ(b0, R.code->GetBasicBlock(b0->last_instruction_index()));
+  CHECK_EQ(b0, R.code->GetBasicBlock(R.code->first_instruction_index(b0)));
+  CHECK_EQ(b0, R.code->GetBasicBlock(R.code->last_instruction_index(b0)));
 
-  CHECK_EQ(b1, R.code->GetBasicBlock(b1->first_instruction_index()));
-  CHECK_EQ(b1, R.code->GetBasicBlock(b1->last_instruction_index()));
+  CHECK_EQ(b1, R.code->GetBasicBlock(R.code->first_instruction_index(b1)));
+  CHECK_EQ(b1, R.code->GetBasicBlock(R.code->last_instruction_index(b1)));
 
-  CHECK_EQ(b2, R.code->GetBasicBlock(b2->first_instruction_index()));
-  CHECK_EQ(b2, R.code->GetBasicBlock(b2->last_instruction_index()));
+  CHECK_EQ(b2, R.code->GetBasicBlock(R.code->first_instruction_index(b2)));
+  CHECK_EQ(b2, R.code->GetBasicBlock(R.code->last_instruction_index(b2)));
 
-  CHECK_EQ(b3, R.code->GetBasicBlock(b3->first_instruction_index()));
-  CHECK_EQ(b3, R.code->GetBasicBlock(b3->last_instruction_index()));
+  CHECK_EQ(b3, R.code->GetBasicBlock(R.code->first_instruction_index(b3)));
+  CHECK_EQ(b3, R.code->GetBasicBlock(R.code->last_instruction_index(b3)));
 }
 
 
@@ -195,8 +195,8 @@
   TestInstr* i0 = TestInstr::New(R.zone(), 100);
   TestInstr* g = TestInstr::New(R.zone(), 103)->MarkAsControl();
   R.code->StartBlock(b0);
-  R.code->AddInstruction(i0, b0);
-  R.code->AddInstruction(g, b0);
+  R.code->AddInstruction(i0);
+  R.code->AddInstruction(g);
   R.code->EndBlock(b0);
 
   CHECK_EQ(true, R.code->InstructionAt(0)->IsBlockStart());
@@ -222,15 +222,15 @@
   TestInstr* i0 = TestInstr::New(R.zone(), 100);
   TestInstr* g = TestInstr::New(R.zone(), 103)->MarkAsControl();
   R.code->StartBlock(b0);
-  R.code->AddInstruction(i0, b0);
-  R.code->AddInstruction(g, b0);
+  R.code->AddInstruction(i0);
+  R.code->AddInstruction(g);
   R.code->EndBlock(b0);
 
   TestInstr* i1 = TestInstr::New(R.zone(), 102);
   TestInstr* g1 = TestInstr::New(R.zone(), 104)->MarkAsControl();
   R.code->StartBlock(b1);
-  R.code->AddInstruction(i1, b1);
-  R.code->AddInstruction(g1, b1);
+  R.code->AddInstruction(i1);
+  R.code->AddInstruction(g1);
   R.code->EndBlock(b1);
 
   CHECK_EQ(true, R.code->InstructionAt(0)->IsBlockStart());
@@ -263,8 +263,8 @@
   TestInstr* i0 = TestInstr::New(R.zone(), 100);
   TestInstr* g = TestInstr::New(R.zone(), 103)->MarkAsControl();
   R.code->StartBlock(b0);
-  R.code->AddInstruction(i0, b0);
-  R.code->AddInstruction(g, b0);
+  R.code->AddInstruction(i0);
+  R.code->AddInstruction(g);
   R.code->EndBlock(b0);
 
   CHECK_EQ(true, R.code->InstructionAt(0)->IsBlockStart());
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index 6b182d0..19d176a 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -18295,7 +18295,7 @@
 
 TEST(Regress2107) {
   const intptr_t MB = 1024 * 1024;
-  const int kIdlePauseInMs = 1000;
+  const int kIdlePauseInMs = 10000;
   LocalContext env;
   v8::Isolate* isolate = env->GetIsolate();
   v8::HandleScope scope(env->GetIsolate());
diff --git a/test/cctest/test-assembler-arm.cc b/test/cctest/test-assembler-arm.cc
index ed9563d..8c1a4d9 100644
--- a/test/cctest/test-assembler-arm.cc
+++ b/test/cctest/test-assembler-arm.cc
@@ -25,6 +25,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+#include <iostream>  // NOLINT(readability/streams)
+
 #include "src/v8.h"
 #include "test/cctest/cctest.h"
 
@@ -34,6 +36,7 @@
 #include "src/factory.h"
 #include "src/ostreams.h"
 
+using namespace v8::base;
 using namespace v8::internal;
 
 
@@ -1495,6 +1498,58 @@
 #undef TEST_SDIV
 
 
+TEST(smmla) {
+  CcTest::InitializeVM();
+  Isolate* const isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+  RandomNumberGenerator* const rng = isolate->random_number_generator();
+  Assembler assm(isolate, nullptr, 0);
+  __ smmla(r1, r1, r2, r3);
+  __ str(r1, MemOperand(r0));
+  __ bx(lr);
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+  code->Print(std::cout);
+#endif
+  F3 f = FUNCTION_CAST<F3>(code->entry());
+  for (size_t i = 0; i < 128; ++i) {
+    int32_t r, x = rng->NextInt(), y = rng->NextInt(), z = rng->NextInt();
+    Object* dummy = CALL_GENERATED_CODE(f, &r, x, y, z, 0);
+    CHECK_EQ(bits::SignedMulHighAndAdd32(x, y, z), r);
+    USE(dummy);
+  }
+}
+
+
+TEST(smmul) {
+  CcTest::InitializeVM();
+  Isolate* const isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+  RandomNumberGenerator* const rng = isolate->random_number_generator();
+  Assembler assm(isolate, nullptr, 0);
+  __ smmul(r1, r1, r2);
+  __ str(r1, MemOperand(r0));
+  __ bx(lr);
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+  code->Print(std::cout);
+#endif
+  F3 f = FUNCTION_CAST<F3>(code->entry());
+  for (size_t i = 0; i < 128; ++i) {
+    int32_t r, x = rng->NextInt(), y = rng->NextInt();
+    Object* dummy = CALL_GENERATED_CODE(f, &r, x, y, 0, 0);
+    CHECK_EQ(bits::SignedMulHigh32(x, y), r);
+    USE(dummy);
+  }
+}
+
+
 TEST(code_relative_offset) {
   // Test extracting the offset of a label from the beginning of the code
   // in a register.
diff --git a/test/cctest/test-disasm-arm.cc b/test/cctest/test-disasm-arm.cc
index c1f6ce2..1fabdc2 100644
--- a/test/cctest/test-disasm-arm.cc
+++ b/test/cctest/test-disasm-arm.cc
@@ -420,6 +420,12 @@
             "e6cf3474       uxtb16 r3, r4, ror #8");
   }
 
+  COMPARE(smmla(r0, r1, r2, r3), "e7503211       smmla r0, r1, r2, r3");
+  COMPARE(smmla(r10, r9, r8, r7), "e75a7819       smmla r10, r9, r8, r7");
+
+  COMPARE(smmul(r0, r1, r2), "e750f211       smmul r0, r1, r2");
+  COMPARE(smmul(r8, r9, r10), "e758fa19       smmul r8, r9, r10");
+
   VERIFY_RUN();
 }
 
diff --git a/test/unittests/base/bits-unittest.cc b/test/unittests/base/bits-unittest.cc
index caedae2..be41007 100644
--- a/test/unittests/base/bits-unittest.cc
+++ b/test/unittests/base/bits-unittest.cc
@@ -199,6 +199,31 @@
   }
 }
 
+
+TEST(Bits, SignedMulHigh32) {
+  EXPECT_EQ(0, SignedMulHigh32(0, 0));
+  TRACED_FORRANGE(int32_t, i, 1, 50) {
+    TRACED_FORRANGE(int32_t, j, 1, i) { EXPECT_EQ(0, SignedMulHigh32(i, j)); }
+  }
+  EXPECT_EQ(-1073741824, SignedMulHigh32(std::numeric_limits<int32_t>::max(),
+                                         std::numeric_limits<int32_t>::min()));
+  EXPECT_EQ(-1073741824, SignedMulHigh32(std::numeric_limits<int32_t>::min(),
+                                         std::numeric_limits<int32_t>::max()));
+  EXPECT_EQ(1, SignedMulHigh32(1024 * 1024 * 1024, 4));
+  EXPECT_EQ(2, SignedMulHigh32(8 * 1024, 1024 * 1024));
+}
+
+
+TEST(Bits, SignedMulHighAndAdd32) {
+  TRACED_FORRANGE(int32_t, i, 1, 50) {
+    EXPECT_EQ(i, SignedMulHighAndAdd32(0, 0, i));
+    TRACED_FORRANGE(int32_t, j, 1, i) {
+      EXPECT_EQ(i, SignedMulHighAndAdd32(j, j, i));
+    }
+    EXPECT_EQ(i + 1, SignedMulHighAndAdd32(1024 * 1024 * 1024, 4, i));
+  }
+}
+
 }  // namespace bits
 }  // namespace base
 }  // namespace v8
diff --git a/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
index 6f03889..b4c3145 100644
--- a/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ b/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -30,6 +30,17 @@
 }
 
 
+struct Shift {
+  MachInst2 mi;
+  AddressingMode mode;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const Shift& shift) {
+  return os << shift.mi;
+}
+
+
 // Helper to build Int32Constant or Int64Constant depending on the given
 // machine type.
 Node* BuildConstant(InstructionSelectorTest::StreamBuilder& m, MachineType type,
@@ -139,15 +150,23 @@
 
 
 // ARM64 shift instructions.
-static const MachInst2 kShiftInstructions[] = {
-    {&RawMachineAssembler::Word32Shl, "Word32Shl", kArm64Lsl32, kMachInt32},
-    {&RawMachineAssembler::Word64Shl, "Word64Shl", kArm64Lsl, kMachInt64},
-    {&RawMachineAssembler::Word32Shr, "Word32Shr", kArm64Lsr32, kMachInt32},
-    {&RawMachineAssembler::Word64Shr, "Word64Shr", kArm64Lsr, kMachInt64},
-    {&RawMachineAssembler::Word32Sar, "Word32Sar", kArm64Asr32, kMachInt32},
-    {&RawMachineAssembler::Word64Sar, "Word64Sar", kArm64Asr, kMachInt64},
-    {&RawMachineAssembler::Word32Ror, "Word32Ror", kArm64Ror32, kMachInt32},
-    {&RawMachineAssembler::Word64Ror, "Word64Ror", kArm64Ror, kMachInt64}};
+static const Shift kShiftInstructions[] = {
+    {{&RawMachineAssembler::Word32Shl, "Word32Shl", kArm64Lsl32, kMachInt32},
+     kMode_Operand2_R_LSL_I},
+    {{&RawMachineAssembler::Word64Shl, "Word64Shl", kArm64Lsl, kMachInt64},
+     kMode_Operand2_R_LSL_I},
+    {{&RawMachineAssembler::Word32Shr, "Word32Shr", kArm64Lsr32, kMachInt32},
+     kMode_Operand2_R_LSR_I},
+    {{&RawMachineAssembler::Word64Shr, "Word64Shr", kArm64Lsr, kMachInt64},
+     kMode_Operand2_R_LSR_I},
+    {{&RawMachineAssembler::Word32Sar, "Word32Sar", kArm64Asr32, kMachInt32},
+     kMode_Operand2_R_ASR_I},
+    {{&RawMachineAssembler::Word64Sar, "Word64Sar", kArm64Asr, kMachInt64},
+     kMode_Operand2_R_ASR_I},
+    {{&RawMachineAssembler::Word32Ror, "Word32Ror", kArm64Ror32, kMachInt32},
+     kMode_Operand2_R_ROR_I},
+    {{&RawMachineAssembler::Word64Ror, "Word64Ror", kArm64Ror, kMachInt64},
+     kMode_Operand2_R_ROR_I}};
 
 
 // ARM64 Mul/Div instructions.
@@ -296,6 +315,46 @@
 }
 
 
+TEST_P(InstructionSelectorLogicalTest, ShiftByImmediate) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  TRACED_FOREACH(Shift, shift, kShiftInstructions) {
+    // Only test 64-bit shifted operands with 64-bit instructions.
+    if (shift.mi.machine_type != type) continue;
+
+    TRACED_FORRANGE(int, imm, 0, ((type == kMachInt32) ? 31 : 63)) {
+      StreamBuilder m(this, type, type, type);
+      m.Return((m.*dpi.constructor)(
+          m.Parameter(0),
+          (m.*shift.mi.constructor)(m.Parameter(1),
+                                    BuildConstant(m, type, imm))));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+      EXPECT_EQ(shift.mode, s[0]->addressing_mode());
+      EXPECT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(2)));
+      EXPECT_EQ(1U, s[0]->OutputCount());
+    }
+
+    TRACED_FORRANGE(int, imm, 0, ((type == kMachInt32) ? 31 : 63)) {
+      StreamBuilder m(this, type, type, type);
+      m.Return((m.*dpi.constructor)(
+          (m.*shift.mi.constructor)(m.Parameter(1),
+                                    BuildConstant(m, type, imm)),
+          m.Parameter(0)));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+      EXPECT_EQ(shift.mode, s[0]->addressing_mode());
+      EXPECT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(2)));
+      EXPECT_EQ(1U, s[0]->OutputCount());
+    }
+  }
+}
+
+
 INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorLogicalTest,
                         ::testing::ValuesIn(kLogicalInstructions));
 
@@ -356,6 +415,37 @@
 }
 
 
+TEST_P(InstructionSelectorAddSubTest, ShiftByImmediateOnRight) {
+  const AddSub dpi = GetParam();
+  const MachineType type = dpi.mi.machine_type;
+  TRACED_FOREACH(Shift, shift, kShiftInstructions) {
+    // Only test 64-bit shifted operands with 64-bit instructions.
+    if (shift.mi.machine_type != type) continue;
+
+    if ((shift.mi.arch_opcode == kArm64Ror32) ||
+        (shift.mi.arch_opcode == kArm64Ror)) {
+      // Not supported by add/sub instructions.
+      continue;
+    }
+
+    TRACED_FORRANGE(int, imm, 0, ((type == kMachInt32) ? 31 : 63)) {
+      StreamBuilder m(this, type, type, type);
+      m.Return((m.*dpi.mi.constructor)(
+          m.Parameter(0),
+          (m.*shift.mi.constructor)(m.Parameter(1),
+                                    BuildConstant(m, type, imm))));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(dpi.mi.arch_opcode, s[0]->arch_opcode());
+      EXPECT_EQ(shift.mode, s[0]->addressing_mode());
+      EXPECT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(2)));
+      EXPECT_EQ(1U, s[0]->OutputCount());
+    }
+  }
+}
+
+
 INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorAddSubTest,
                         ::testing::ValuesIn(kAddSubInstructions));
 
@@ -455,6 +545,51 @@
 }
 
 
+TEST_F(InstructionSelectorTest, AddShiftByImmediateOnLeft) {
+  // 32-bit add.
+  TRACED_FOREACH(Shift, shift, kShiftInstructions) {
+    // Only test relevant shifted operands.
+    if (shift.mi.machine_type != kMachInt32) continue;
+    if (shift.mi.arch_opcode == kArm64Ror32) continue;
+
+    TRACED_FORRANGE(int, imm, 0, 31) {
+      StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+      m.Return((m.Int32Add)(
+          (m.*shift.mi.constructor)(m.Parameter(1), m.Int32Constant(imm)),
+          m.Parameter(0)));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+      EXPECT_EQ(shift.mode, s[0]->addressing_mode());
+      EXPECT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(2)));
+      EXPECT_EQ(1U, s[0]->OutputCount());
+    }
+  }
+
+  // 64-bit add.
+  TRACED_FOREACH(Shift, shift, kShiftInstructions) {
+    // Only test relevant shifted operands.
+    if (shift.mi.machine_type != kMachInt64) continue;
+    if (shift.mi.arch_opcode == kArm64Ror) continue;
+
+    TRACED_FORRANGE(int, imm, 0, 63) {
+      StreamBuilder m(this, kMachInt64, kMachInt64, kMachInt64);
+      m.Return((m.Int64Add)(
+          (m.*shift.mi.constructor)(m.Parameter(1), m.Int64Constant(imm)),
+          m.Parameter(0)));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
+      EXPECT_EQ(shift.mode, s[0]->addressing_mode());
+      EXPECT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(2)));
+      EXPECT_EQ(1U, s[0]->OutputCount());
+    }
+  }
+}
+
+
 // -----------------------------------------------------------------------------
 // Data processing controlled branches.
 
@@ -818,32 +953,31 @@
 // Shift instructions.
 
 
-typedef InstructionSelectorTestWithParam<MachInst2>
-    InstructionSelectorShiftTest;
+typedef InstructionSelectorTestWithParam<Shift> InstructionSelectorShiftTest;
 
 
 TEST_P(InstructionSelectorShiftTest, Parameter) {
-  const MachInst2 dpi = GetParam();
-  const MachineType type = dpi.machine_type;
+  const Shift shift = GetParam();
+  const MachineType type = shift.mi.machine_type;
   StreamBuilder m(this, type, type, type);
-  m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+  m.Return((m.*shift.mi.constructor)(m.Parameter(0), m.Parameter(1)));
   Stream s = m.Build();
   ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(shift.mi.arch_opcode, s[0]->arch_opcode());
   EXPECT_EQ(2U, s[0]->InputCount());
   EXPECT_EQ(1U, s[0]->OutputCount());
 }
 
 
 TEST_P(InstructionSelectorShiftTest, Immediate) {
-  const MachInst2 dpi = GetParam();
-  const MachineType type = dpi.machine_type;
+  const Shift shift = GetParam();
+  const MachineType type = shift.mi.machine_type;
   TRACED_FORRANGE(int32_t, imm, 0, (ElementSizeOf(type) * 8) - 1) {
     StreamBuilder m(this, type, type);
-    m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
+    m.Return((m.*shift.mi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
     Stream s = m.Build();
     ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(shift.mi.arch_opcode, s[0]->arch_opcode());
     EXPECT_EQ(2U, s[0]->InputCount());
     EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
     EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
diff --git a/test/unittests/compiler/mips/OWNERS b/test/unittests/compiler/mips/OWNERS
new file mode 100644
index 0000000..5508ba6
--- /dev/null
+++ b/test/unittests/compiler/mips/OWNERS
@@ -0,0 +1,5 @@
+paul.lind@imgtec.com
+gergely.kis@imgtec.com
+akos.palfi@imgtec.com
+balazs.kilvady@imgtec.com
+dusan.milosavljevic@imgtec.com
diff --git a/tools/push-to-trunk/chromium_roll.py b/tools/push-to-trunk/chromium_roll.py
index ceedbc1..5c9a38e 100755
--- a/tools/push-to-trunk/chromium_roll.py
+++ b/tools/push-to-trunk/chromium_roll.py
@@ -24,7 +24,6 @@
   def RunStep(self):
     self["last_push"] = self._options.last_push or self.FindLastTrunkPush(
         branch="origin/candidates", include_patches=True)
-    self["trunk_revision"] = self.GetCommitPositionNumber(self["last_push"])
     self["push_title"] = self.GitLog(n=1, format="%s",
                                      git_hash=self["last_push"])
 
@@ -56,7 +55,7 @@
     # Update v8 remotes.
     self.GitFetchOrigin()
 
-    self.GitCreateBranch("v8-roll-%s" % self["trunk_revision"],
+    self.GitCreateBranch("v8-roll-%s" % self["last_push"],
                          cwd=self._options.chromium)
 
 
@@ -66,9 +65,9 @@
   def RunStep(self):
     # Patch DEPS file.
     if self.Command(
-        "roll-dep", "v8 %s" % self["trunk_revision"],
+        "roll-dep", "v8 %s" % self["last_push"],
         cwd=self._options.chromium) is None:
-      self.Die("Failed to create deps for %s" % self["trunk_revision"])
+      self.Die("Failed to create deps for %s" % self["last_push"])
 
     commit_title = "Update V8 to %s." % self["push_title"].lower()
     sheriff = ""
@@ -87,7 +86,7 @@
       print "CL uploaded."
     else:
       self.GitCheckout("master", cwd=self._options.chromium)
-      self.GitDeleteBranch("v8-roll-%s" % self["trunk_revision"],
+      self.GitDeleteBranch("v8-roll-%s" % self["last_push"],
                            cwd=self._options.chromium)
       print "Dry run - don't upload."
 
@@ -105,9 +104,9 @@
   MESSAGE = "Done!"
 
   def RunStep(self):
-    print("Congratulations, you have successfully rolled the push r%s it into "
+    print("Congratulations, you have successfully rolled %s into "
           "Chromium. Please don't forget to update the v8rel spreadsheet."
-          % self["trunk_revision"])
+          % self["last_push"])
 
     # Clean up all temporary files.
     Command("rm", "-f %s*" % self._config["PERSISTFILE_BASENAME"])
diff --git a/tools/push-to-trunk/test_scripts.py b/tools/push-to-trunk/test_scripts.py
index ee7a98d..3800b08 100644
--- a/tools/push-to-trunk/test_scripts.py
+++ b/tools/push-to-trunk/test_scripts.py
@@ -1006,7 +1006,6 @@
       Cmd(("git log -1 --format=%H --grep="
            "\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]*\" "
            "origin/candidates"), "push_hash\n"),
-      Cmd("git log -1 --format=%B push_hash", self.C_V8_22624_LOG),
       Cmd("git log -1 --format=%s push_hash",
           "Version 3.22.5 (based on bleeding_edge revision r22622)\n"),
       URL("https://chromium-build.appspot.com/p/chromium/sheriff_v8.js",
@@ -1016,8 +1015,8 @@
       Cmd("gclient sync --nohooks", "syncing...", cwd=chrome_dir),
       Cmd("git pull", "", cwd=chrome_dir),
       Cmd("git fetch origin", ""),
-      Cmd("git checkout -b v8-roll-22624", "", cwd=chrome_dir),
-      Cmd("roll-dep v8 22624", "rolled", cb=WriteDeps, cwd=chrome_dir),
+      Cmd("git checkout -b v8-roll-push_hash", "", cwd=chrome_dir),
+      Cmd("roll-dep v8 push_hash", "rolled", cb=WriteDeps, cwd=chrome_dir),
       Cmd(("git commit -am \"Update V8 to version 3.22.5 "
            "(based on bleeding_edge revision r22622).\n\n"
            "Please reply to the V8 sheriff c_name@chromium.org in "