Merge v8 from https://chromium.googlesource.com/v8/v8.git at 78b694ed1084696cb7b8c01ec78e79ff24e255e8

This commit was generated by merge_from_chromium.py.

Change-Id: I2922e7b2582bbe21f5993a88992442fd4b4e1d5b
diff --git a/BUILD.gn b/BUILD.gn
index 7d6bf41..3a9fa66 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -550,7 +550,6 @@
     "src/compiler/node-aux-data.h",
     "src/compiler/node-cache.cc",
     "src/compiler/node-cache.h",
-    "src/compiler/node-matchers.cc",
     "src/compiler/node-matchers.h",
     "src/compiler/node-properties-inl.h",
     "src/compiler/node-properties.h",
diff --git a/ChangeLog b/ChangeLog
index 4af59b9..54c3456 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,11 @@
+2014-10-31: Version 3.30.23
+
+        Introduce v8::Exception::GetMessage to find location of an error object
+        (Chromium issue 427954).
+
+        Performance and stability improvements on all platforms.
+
+
 2014-10-30: Version 3.30.22
 
         MIPS: Classes: Add super support in methods and accessors (issue 3330).
diff --git a/Makefile b/Makefile
index e96362e..3b02f52 100644
--- a/Makefile
+++ b/Makefile
@@ -257,7 +257,7 @@
 ENVFILE = $(OUTDIR)/environment
 
 .PHONY: all check clean builddeps dependencies $(ENVFILE).new native \
-        qc quickcheck $(QUICKCHECKS) \
+        qc quickcheck $(QUICKCHECKS) turbocheck \
         $(addsuffix .quickcheck,$(MODES)) $(addsuffix .quickcheck,$(ARCHES)) \
         $(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \
         $(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES)) \
@@ -386,6 +386,15 @@
 	    --arch-and-mode=$(FASTTESTMODES) $(TESTFLAGS) --quickcheck
 qc: quickcheck
 
+turbocheck: $(subst $(COMMA),$(SPACE),$(FASTCOMPILEMODES))
+	tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
+	    --arch-and-mode=$(SUPERFASTTESTMODES) $(TESTFLAGS) \
+	    --quickcheck --variants=turbofan --download-data mozilla webkit
+	tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
+	    --arch-and-mode=$(FASTTESTMODES) $(TESTFLAGS) \
+	    --quickcheck --variants=turbofan
+tc: turbocheck
+
 # Clean targets. You can clean each architecture individually, or everything.
 $(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES) $(NACL_ARCHES)):
 	rm -f $(OUTDIR)/Makefile.$(basename $@)*
diff --git a/include/v8.h b/include/v8.h
index e783727..8df90d2 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -4164,6 +4164,9 @@
   static Local<Value> TypeError(Handle<String> message);
   static Local<Value> Error(Handle<String> message);
 
+  static Local<Message> GetMessage(Handle<Value> exception);
+
+  // DEPRECATED. Use GetMessage()->GetStackTrace()
   static Local<StackTrace> GetStackTrace(Handle<Value> exception);
 };
 
@@ -4224,6 +4227,8 @@
   V8_INLINE Handle<Promise> GetPromise() const { return promise_; }
   V8_INLINE PromiseRejectEvent GetEvent() const { return event_; }
   V8_INLINE Handle<Value> GetValue() const { return value_; }
+
+  // DEPRECATED. Use v8::Exception::GetMessage(GetValue())->GetStackTrace()
   V8_INLINE Handle<StackTrace> GetStackTrace() const { return stack_trace_; }
 
  private:
@@ -5505,9 +5510,17 @@
    * all TryCatch blocks should be stack allocated because the memory
    * location itself is compared against JavaScript try/catch blocks.
    */
+  // TODO(dcarney): deprecate.
   TryCatch();
 
   /**
+   * Creates a new try/catch block and registers it with v8.  Note that
+   * all TryCatch blocks should be stack allocated because the memory
+   * location itself is compared against JavaScript try/catch blocks.
+   */
+  TryCatch(Isolate* isolate);
+
+  /**
    * Unregisters and deletes this try/catch block.
    */
   ~TryCatch();
diff --git a/src/api.cc b/src/api.cc
index 6a7c40a..5abd0eb 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -730,11 +730,11 @@
 // about this there is no HandleScope in this method.  When you add one to the
 // site calling this method you should check that you ensured the VM was not
 // dead first.
-void NeanderArray::add(i::Handle<i::Object> value) {
+void NeanderArray::add(i::Isolate* isolate, i::Handle<i::Object> value) {
   int length = this->length();
   int size = obj_.size();
   if (length == size - 1) {
-    i::Factory* factory = i::Isolate::Current()->factory();
+    i::Factory* factory = isolate->factory();
     i::Handle<i::FixedArray> new_elms = factory->NewFixedArray(2 * size);
     for (int i = 0; i < length; i++)
       new_elms->set(i + 1, get(i));
@@ -769,12 +769,12 @@
     Utils::OpenHandle(templ)->set_property_list(*list);
   }
   NeanderArray array(list);
-  array.add(isolate->factory()->NewNumberFromInt(length));
+  array.add(isolate, isolate->factory()->NewNumberFromInt(length));
   for (int i = 0; i < length; i++) {
     i::Handle<i::Object> value = data[i].IsEmpty() ?
         i::Handle<i::Object>(isolate->factory()->undefined_value()) :
         Utils::OpenHandle(*data[i]);
-    array.add(value);
+    array.add(isolate, value);
   }
 }
 
@@ -782,7 +782,7 @@
 void Template::Set(v8::Handle<Name> name,
                    v8::Handle<Data> value,
                    v8::PropertyAttribute attribute) {
-  i::Isolate* isolate = i::Isolate::Current();
+  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   const int kSize = 3;
@@ -1064,11 +1064,9 @@
 
 
 int TypeSwitch::match(v8::Handle<Value> value) {
-  i::Isolate* isolate = i::Isolate::Current();
-  LOG_API(isolate, "TypeSwitch::match");
-  USE(isolate);
-  i::Handle<i::Object> obj = Utils::OpenHandle(*value);
   i::Handle<i::TypeSwitchInfo> info = Utils::OpenHandle(this);
+  LOG_API(info->GetIsolate(), "TypeSwitch::match");
+  i::Handle<i::Object> obj = Utils::OpenHandle(*value);
   i::FixedArray* types = i::FixedArray::cast(info->types());
   for (int i = 0; i < types->length(); i++) {
     if (i::FunctionTemplateInfo::cast(types->get(i))->IsTemplateFor(*obj))
@@ -1272,7 +1270,7 @@
     info->set_property_accessors(*list);
   }
   NeanderArray array(list);
-  array.add(obj);
+  array.add(isolate, obj);
 }
 
 
@@ -1900,8 +1898,24 @@
 }
 
 
+v8::TryCatch::TryCatch(v8::Isolate* isolate)
+    : isolate_(reinterpret_cast<i::Isolate*>(isolate)),
+      next_(isolate_->try_catch_handler()),
+      is_verbose_(false),
+      can_continue_(true),
+      capture_message_(true),
+      rethrow_(false),
+      has_terminated_(false) {
+  ResetInternal();
+  // Special handling for simulators which have a separate JS stack.
+  js_stack_comparable_address_ =
+      reinterpret_cast<void*>(v8::internal::SimulatorStack::RegisterCTryCatch(
+          v8::internal::GetCurrentStackPosition()));
+  isolate_->RegisterTryCatchHandler(this);
+}
+
+
 v8::TryCatch::~TryCatch() {
-  DCHECK(isolate_ == i::Isolate::Current());
   if (rethrow_) {
     v8::Isolate* isolate = reinterpret_cast<Isolate*>(isolate_);
     v8::HandleScope scope(isolate);
@@ -1954,7 +1968,6 @@
 
 
 v8::Local<Value> v8::TryCatch::Exception() const {
-  DCHECK(isolate_ == i::Isolate::Current());
   if (HasCaught()) {
     // Check for out of memory exception.
     i::Object* exception = reinterpret_cast<i::Object*>(exception_);
@@ -1966,7 +1979,6 @@
 
 
 v8::Local<Value> v8::TryCatch::StackTrace() const {
-  DCHECK(isolate_ == i::Isolate::Current());
   if (HasCaught()) {
     i::Object* raw_obj = reinterpret_cast<i::Object*>(exception_);
     if (!raw_obj->IsJSObject()) return v8::Local<Value>();
@@ -1990,7 +2002,6 @@
 
 
 v8::Local<v8::Message> v8::TryCatch::Message() const {
-  DCHECK(isolate_ == i::Isolate::Current());
   i::Object* message = reinterpret_cast<i::Object*>(message_obj_);
   DCHECK(message->IsJSMessageObject() || message->IsTheHole());
   if (HasCaught() && !message->IsTheHole()) {
@@ -2002,7 +2013,6 @@
 
 
 void v8::TryCatch::Reset() {
-  DCHECK(isolate_ == i::Isolate::Current());
   if (!rethrow_ && HasCaught() && isolate_->has_scheduled_exception()) {
     // If an exception was caught but is still scheduled because no API call
     // promoted it, then it is canceled to prevent it from being propagated.
@@ -2090,11 +2100,8 @@
 
 
 MUST_USE_RESULT static i::MaybeHandle<i::Object> CallV8HeapFunction(
-    const char* name,
-    i::Handle<i::Object> recv,
-    int argc,
+    i::Isolate* isolate, const char* name, i::Handle<i::Object> recv, int argc,
     i::Handle<i::Object> argv[]) {
-  i::Isolate* isolate = i::Isolate::Current();
   i::Handle<i::Object> object_fun =
       i::Object::GetProperty(
           isolate, isolate->js_builtins_object(), name).ToHandleChecked();
@@ -2104,13 +2111,10 @@
 
 
 MUST_USE_RESULT static i::MaybeHandle<i::Object> CallV8HeapFunction(
-    const char* name,
-    i::Handle<i::Object> data) {
+    i::Isolate* isolate, const char* name, i::Handle<i::Object> data) {
   i::Handle<i::Object> argv[] = { data };
-  return CallV8HeapFunction(name,
-                            i::Isolate::Current()->js_builtins_object(),
-                            arraysize(argv),
-                            argv);
+  return CallV8HeapFunction(isolate, name, isolate->js_builtins_object(),
+                            arraysize(argv), argv);
 }
 
 
@@ -2122,8 +2126,9 @@
 
   EXCEPTION_PREAMBLE(isolate);
   i::Handle<i::Object> result;
-  has_pending_exception = !CallV8HeapFunction(
-      "GetLineNumber", Utils::OpenHandle(this)).ToHandle(&result);
+  has_pending_exception =
+      !CallV8HeapFunction(isolate, "GetLineNumber", Utils::OpenHandle(this))
+           .ToHandle(&result);
   EXCEPTION_BAILOUT_CHECK(isolate, 0);
   return static_cast<int>(result->Number());
 }
@@ -2157,8 +2162,9 @@
   i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
   EXCEPTION_PREAMBLE(isolate);
   i::Handle<i::Object> start_col_obj;
-  has_pending_exception = !CallV8HeapFunction(
-      "GetPositionInLine", data_obj).ToHandle(&start_col_obj);
+  has_pending_exception =
+      !CallV8HeapFunction(isolate, "GetPositionInLine", data_obj)
+           .ToHandle(&start_col_obj);
   EXCEPTION_BAILOUT_CHECK(isolate, 0);
   return static_cast<int>(start_col_obj->Number());
 }
@@ -2172,8 +2178,9 @@
   i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
   EXCEPTION_PREAMBLE(isolate);
   i::Handle<i::Object> start_col_obj;
-  has_pending_exception = !CallV8HeapFunction(
-      "GetPositionInLine", data_obj).ToHandle(&start_col_obj);
+  has_pending_exception =
+      !CallV8HeapFunction(isolate, "GetPositionInLine", data_obj)
+           .ToHandle(&start_col_obj);
   EXCEPTION_BAILOUT_CHECK(isolate, 0);
   i::Handle<i::JSMessageObject> message =
       i::Handle<i::JSMessageObject>::cast(data_obj);
@@ -2203,8 +2210,9 @@
   EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
   EXCEPTION_PREAMBLE(isolate);
   i::Handle<i::Object> result;
-  has_pending_exception = !CallV8HeapFunction(
-      "GetSourceLine", Utils::OpenHandle(this)).ToHandle(&result);
+  has_pending_exception =
+      !CallV8HeapFunction(isolate, "GetSourceLine", Utils::OpenHandle(this))
+           .ToHandle(&result);
   EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::String>());
   if (result->IsString()) {
     return scope.Escape(Utils::ToLocal(i::Handle<i::String>::cast(result)));
@@ -3021,8 +3029,9 @@
   i::Handle<i::Object> args[] = { other };
   EXCEPTION_PREAMBLE(isolate);
   i::Handle<i::Object> result;
-  has_pending_exception = !CallV8HeapFunction(
-      "EQUALS", obj, arraysize(args), args).ToHandle(&result);
+  has_pending_exception =
+      !CallV8HeapFunction(isolate, "EQUALS", obj, arraysize(args), args)
+           .ToHandle(&result);
   EXCEPTION_BAILOUT_CHECK(isolate, false);
   return *result == i::Smi::FromInt(i::EQUAL);
 }
@@ -3287,11 +3296,10 @@
   i::Handle<i::Object> args[] = { obj, key_name };
   EXCEPTION_PREAMBLE(isolate);
   i::Handle<i::Object> result;
-  has_pending_exception = !CallV8HeapFunction(
-      "ObjectGetOwnPropertyDescriptor",
-      isolate->factory()->undefined_value(),
-      arraysize(args),
-      args).ToHandle(&result);
+  has_pending_exception =
+      !CallV8HeapFunction(isolate, "ObjectGetOwnPropertyDescriptor",
+                          isolate->factory()->undefined_value(),
+                          arraysize(args), args).ToHandle(&result);
   EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
   return Utils::ToLocal(result);
 }
@@ -5267,25 +5275,25 @@
 
 
 void v8::Context::SetSecurityToken(Handle<Value> token) {
-  i::Isolate* isolate = i::Isolate::Current();
-  ENTER_V8(isolate);
   i::Handle<i::Context> env = Utils::OpenHandle(this);
+  i::Isolate* isolate = env->GetIsolate();
+  ENTER_V8(isolate);
   i::Handle<i::Object> token_handle = Utils::OpenHandle(*token);
   env->set_security_token(*token_handle);
 }
 
 
 void v8::Context::UseDefaultSecurityToken() {
-  i::Isolate* isolate = i::Isolate::Current();
-  ENTER_V8(isolate);
   i::Handle<i::Context> env = Utils::OpenHandle(this);
+  i::Isolate* isolate = env->GetIsolate();
+  ENTER_V8(isolate);
   env->set_security_token(env->global_object());
 }
 
 
 Handle<Value> v8::Context::GetSecurityToken() {
-  i::Isolate* isolate = i::Isolate::Current();
   i::Handle<i::Context> env = Utils::OpenHandle(this);
+  i::Isolate* isolate = env->GetIsolate();
   i::Object* security_token = env->security_token();
   i::Handle<i::Object> token_handle(security_token, isolate);
   return Utils::ToLocal(token_handle);
@@ -5344,40 +5352,42 @@
 
 
 Local<v8::Object> ObjectTemplate::NewInstance() {
-  i::Isolate* isolate = i::Isolate::Current();
+  i::Handle<i::ObjectTemplateInfo> info = Utils::OpenHandle(this);
+  i::Isolate* isolate = info->GetIsolate();
   ON_BAILOUT(isolate, "v8::ObjectTemplate::NewInstance()",
              return Local<v8::Object>());
   LOG_API(isolate, "ObjectTemplate::NewInstance");
   ENTER_V8(isolate);
   EXCEPTION_PREAMBLE(isolate);
   i::Handle<i::Object> obj;
-  has_pending_exception = !i::Execution::InstantiateObject(
-      Utils::OpenHandle(this)).ToHandle(&obj);
+  has_pending_exception = !i::Execution::InstantiateObject(info).ToHandle(&obj);
   EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
   return Utils::ToLocal(i::Handle<i::JSObject>::cast(obj));
 }
 
 
 Local<v8::Function> FunctionTemplate::GetFunction() {
-  i::Isolate* isolate = i::Isolate::Current();
+  i::Handle<i::FunctionTemplateInfo> info = Utils::OpenHandle(this);
+  i::Isolate* isolate = info->GetIsolate();
   ON_BAILOUT(isolate, "v8::FunctionTemplate::GetFunction()",
              return Local<v8::Function>());
   LOG_API(isolate, "FunctionTemplate::GetFunction");
   ENTER_V8(isolate);
   EXCEPTION_PREAMBLE(isolate);
   i::Handle<i::Object> obj;
-  has_pending_exception = !i::Execution::InstantiateFunction(
-      Utils::OpenHandle(this)).ToHandle(&obj);
+  has_pending_exception =
+      !i::Execution::InstantiateFunction(info).ToHandle(&obj);
   EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Function>());
   return Utils::ToLocal(i::Handle<i::JSFunction>::cast(obj));
 }
 
 
 bool FunctionTemplate::HasInstance(v8::Handle<v8::Value> value) {
-  ON_BAILOUT(i::Isolate::Current(), "v8::FunctionTemplate::HasInstanceOf()",
-             return false);
+  i::Handle<i::FunctionTemplateInfo> info = Utils::OpenHandle(this);
+  i::Isolate* isolate = info->GetIsolate();
+  ON_BAILOUT(isolate, "v8::FunctionTemplate::HasInstanceOf()", return false);
   i::Object* obj = *Utils::OpenHandle(*value);
-  return Utils::OpenHandle(this)->IsTemplateFor(obj);
+  return info->IsTemplateFor(obj);
 }
 
 
@@ -6807,7 +6817,7 @@
   obj.set(0, *isolate->factory()->NewForeign(FUNCTION_ADDR(that)));
   obj.set(1, data.IsEmpty() ? isolate->heap()->undefined_value()
                             : *Utils::OpenHandle(*data));
-  listeners.add(obj.value());
+  listeners.add(isolate, obj.value());
   return true;
 }
 
@@ -6961,6 +6971,17 @@
 #undef DEFINE_ERROR
 
 
+Local<Message> Exception::GetMessage(Handle<Value> exception) {
+  i::Handle<i::Object> obj = Utils::OpenHandle(*exception);
+  if (!obj->IsHeapObject()) return Local<Message>();
+  i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
+  ENTER_V8(isolate);
+  i::HandleScope scope(isolate);
+  return Utils::MessageToLocal(
+      scope.CloseAndEscape(isolate->CreateMessage(obj, NULL)));
+}
+
+
 Local<StackTrace> Exception::GetStackTrace(Handle<Value> exception) {
   i::Handle<i::Object> obj = Utils::OpenHandle(*exception);
   if (!obj->IsJSObject()) return Local<StackTrace>();
diff --git a/src/api.h b/src/api.h
index 1dc8947..1d2a8c8 100644
--- a/src/api.h
+++ b/src/api.h
@@ -54,7 +54,8 @@
     return obj_.value();
   }
 
-  void add(v8::internal::Handle<v8::internal::Object> value);
+  void add(internal::Isolate* isolate,
+           v8::internal::Handle<v8::internal::Object> value);
 
   int length();
 
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 90018ea..17bf4f9 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -3094,6 +3094,76 @@
 }
 
 
+void Assembler::vrinta(const DwVfpRegister dst, const DwVfpRegister src) {
+  // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
+  // 10(19-18) | RM=00(17-16) |  Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
+  // M(5) | 0(4) | Vm(3-0)
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vm, m;
+  src.split_code(&vm, &m);
+  emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | vd * B12 |
+       0x5 * B9 | B8 | B6 | m * B5 | vm);
+}
+
+
+void Assembler::vrintn(const DwVfpRegister dst, const DwVfpRegister src) {
+  // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
+  // 10(19-18) | RM=01(17-16) |  Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
+  // M(5) | 0(4) | Vm(3-0)
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vm, m;
+  src.split_code(&vm, &m);
+  emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x1 * B16 |
+       vd * B12 | 0x5 * B9 | B8 | B6 | m * B5 | vm);
+}
+
+
+void Assembler::vrintp(const DwVfpRegister dst, const DwVfpRegister src) {
+  // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
+  // 10(19-18) | RM=10(17-16) |  Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
+  // M(5) | 0(4) | Vm(3-0)
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vm, m;
+  src.split_code(&vm, &m);
+  emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x2 * B16 |
+       vd * B12 | 0x5 * B9 | B8 | B6 | m * B5 | vm);
+}
+
+
+void Assembler::vrintm(const DwVfpRegister dst, const DwVfpRegister src) {
+  // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
+  // 10(19-18) | RM=11(17-16) |  Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
+  // M(5) | 0(4) | Vm(3-0)
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vm, m;
+  src.split_code(&vm, &m);
+  emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x3 * B16 |
+       vd * B12 | 0x5 * B9 | B8 | B6 | m * B5 | vm);
+}
+
+
+void Assembler::vrintz(const DwVfpRegister dst, const DwVfpRegister src,
+                       const Condition cond) {
+  // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 011(19-17) | 0(16) |
+  // Vd(15-12) | 101(11-9) | sz=1(8) | op=1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  int vd, d;
+  dst.split_code(&vd, &d);
+  int vm, m;
+  src.split_code(&vm, &m);
+  emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x3 * B17 | vd * B12 |
+       0x5 * B9 | B8 | B7 | B6 | m * B5 | vm);
+}
+
+
 // Support for NEON.
 
 void Assembler::vld1(NeonSize size,
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index f78cc50..9087fab 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -1279,6 +1279,14 @@
              const DwVfpRegister src,
              const Condition cond = al);
 
+  // ARMv8 rounding instructions.
+  void vrinta(const DwVfpRegister dst, const DwVfpRegister src);
+  void vrintn(const DwVfpRegister dst, const DwVfpRegister src);
+  void vrintm(const DwVfpRegister dst, const DwVfpRegister src);
+  void vrintp(const DwVfpRegister dst, const DwVfpRegister src);
+  void vrintz(const DwVfpRegister dst, const DwVfpRegister src,
+              const Condition cond = al);
+
   // Support for NEON.
   // All these APIs support D0 to D31 and Q0 to Q15.
 
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index 375ef89..2a293b3 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -161,26 +161,26 @@
 
 // Instruction encoding bits and masks.
 enum {
-  H   = 1 << 5,   // Halfword (or byte).
-  S6  = 1 << 6,   // Signed (or unsigned).
-  L   = 1 << 20,  // Load (or store).
-  S   = 1 << 20,  // Set condition code (or leave unchanged).
-  W   = 1 << 21,  // Writeback base register (or leave unchanged).
-  A   = 1 << 21,  // Accumulate in multiply instruction (or not).
-  B   = 1 << 22,  // Unsigned byte (or word).
-  N   = 1 << 22,  // Long (or short).
-  U   = 1 << 23,  // Positive (or negative) offset/index.
-  P   = 1 << 24,  // Offset/pre-indexed addressing (or post-indexed addressing).
-  I   = 1 << 25,  // Immediate shifter operand (or not).
-
-  B4  = 1 << 4,
-  B5  = 1 << 5,
-  B6  = 1 << 6,
-  B7  = 1 << 7,
-  B8  = 1 << 8,
-  B9  = 1 << 9,
+  H = 1 << 5,   // Halfword (or byte).
+  S6 = 1 << 6,  // Signed (or unsigned).
+  L = 1 << 20,  // Load (or store).
+  S = 1 << 20,  // Set condition code (or leave unchanged).
+  W = 1 << 21,  // Writeback base register (or leave unchanged).
+  A = 1 << 21,  // Accumulate in multiply instruction (or not).
+  B = 1 << 22,  // Unsigned byte (or word).
+  N = 1 << 22,  // Long (or short).
+  U = 1 << 23,  // Positive (or negative) offset/index.
+  P = 1 << 24,  // Offset/pre-indexed addressing (or post-indexed addressing).
+  I = 1 << 25,  // Immediate shifter operand (or not).
+  B4 = 1 << 4,
+  B5 = 1 << 5,
+  B6 = 1 << 6,
+  B7 = 1 << 7,
+  B8 = 1 << 8,
+  B9 = 1 << 9,
   B12 = 1 << 12,
   B16 = 1 << 16,
+  B17 = 1 << 17,
   B18 = 1 << 18,
   B19 = 1 << 19,
   B20 = 1 << 20,
@@ -194,16 +194,16 @@
   B28 = 1 << 28,
 
   // Instruction bit masks.
-  kCondMask   = 15 << 28,
-  kALUMask    = 0x6f << 21,
-  kRdMask     = 15 << 12,  // In str instruction.
+  kCondMask = 15 << 28,
+  kALUMask = 0x6f << 21,
+  kRdMask = 15 << 12,  // In str instruction.
   kCoprocessorMask = 15 << 8,
   kOpCodeMask = 15 << 21,  // In data-processing instructions.
-  kImm24Mask  = (1 << 24) - 1,
-  kImm16Mask  = (1 << 16) - 1,
-  kImm8Mask  = (1 << 8) - 1,
-  kOff12Mask  = (1 << 12) - 1,
-  kOff8Mask  = (1 << 8) - 1
+  kImm24Mask = (1 << 24) - 1,
+  kImm16Mask = (1 << 16) - 1,
+  kImm8Mask = (1 << 8) - 1,
+  kOff12Mask = (1 << 12) - 1,
+  kOff8Mask = (1 << 8) - 1
 };
 
 
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 2f3a9c7..dc26018 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -1277,6 +1277,14 @@
         } else {
           Unknown(instr);  // Not used by V8.
         }
+      } else if (((instr->Opc2Value() == 0x6)) && instr->Opc3Value() == 0x3) {
+        bool dp_operation = (instr->SzValue() == 1);
+        // vrintz - round towards zero (truncate)
+        if (dp_operation) {
+          Format(instr, "vrintz'cond.f64.f64 'Dd, 'Dm");
+        } else {
+          Unknown(instr);  // Not used by V8.
+        }
       } else {
         Unknown(instr);  // Not used by V8.
       }
@@ -1627,6 +1635,50 @@
         Unknown(instr);
       }
       break;
+    case 0x1D:
+      if (instr->Opc1Value() == 0x7 && instr->Bits(19, 18) == 0x2 &&
+          instr->Bits(11, 9) == 0x5 && instr->Bits(7, 6) == 0x1 &&
+          instr->Bit(4) == 0x0) {
+        // VRINTA, VRINTN, VRINTP, VRINTM (floating-point)
+        bool dp_operation = (instr->SzValue() == 1);
+        int rounding_mode = instr->Bits(17, 16);
+        switch (rounding_mode) {
+          case 0x0:
+            if (dp_operation) {
+              Format(instr, "vrinta.f64.f64 'Dd, 'Dm");
+            } else {
+              Unknown(instr);
+            }
+            break;
+          case 0x1:
+            if (dp_operation) {
+              Format(instr, "vrintn.f64.f64 'Dd, 'Dm");
+            } else {
+              Unknown(instr);
+            }
+            break;
+          case 0x2:
+            if (dp_operation) {
+              Format(instr, "vrintp.f64.f64 'Dd, 'Dm");
+            } else {
+              Unknown(instr);
+            }
+            break;
+          case 0x3:
+            if (dp_operation) {
+              Format(instr, "vrintm.f64.f64 'Dd, 'Dm");
+            } else {
+              Unknown(instr);
+            }
+            break;
+          default:
+            UNREACHABLE();  // Case analysis is exhaustive.
+            break;
+        }
+      } else {
+        Unknown(instr);
+      }
+      break;
     default:
       Unknown(instr);
       break;
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index ceabe78..aeb35c8 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -2957,6 +2957,12 @@
         } else {
           UNREACHABLE();  // Not used by v8.
         }
+      } else if (((instr->Opc2Value() == 0x6)) && (instr->Opc3Value() == 0x3)) {
+        // vrintz - truncate
+        double dm_value = get_double_from_d_register(vm);
+        double dd_value = std::trunc(dm_value);
+        dd_value = canonicalizeNaN(dd_value);
+        set_d_register_from_double(vd, dd_value);
       } else {
         UNREACHABLE();  // Not used by V8.
       }
@@ -3607,6 +3613,50 @@
         UNIMPLEMENTED();
       }
       break;
+    case 0x1D:
+      if (instr->Opc1Value() == 0x7 && instr->Opc3Value() == 0x1 &&
+          instr->Bits(11, 9) == 0x5 && instr->Bits(19, 18) == 0x2 &&
+          instr->Bit(8) == 0x1) {
+        int vm = instr->VFPMRegValue(kDoublePrecision);
+        int vd = instr->VFPDRegValue(kDoublePrecision);
+        double dm_value = get_double_from_d_register(vm);
+        double dd_value = 0.0;
+        int rounding_mode = instr->Bits(17, 16);
+        switch (rounding_mode) {
+          case 0x0:  // vrinta - round with ties to away from zero
+            dd_value = std::round(dm_value);
+            break;
+          case 0x1: {  // vrintn - round with ties to even
+            dd_value = std::floor(dm_value);
+            double error = dm_value - dd_value;
+            // Take care of correctly handling the range [-0.5, -0.0], which
+            // must yield -0.0.
+            if ((-0.5 <= dm_value) && (dm_value < 0.0)) {
+              dd_value = -0.0;
+              // If the error is greater than 0.5, or is equal to 0.5 and the
+              // integer result is odd, round up.
+            } else if ((error > 0.5) ||
+                       ((error == 0.5) && (fmod(dd_value, 2) != 0))) {
+              dd_value++;
+            }
+            break;
+          }
+          case 0x2:  // vrintp - ceil
+            dd_value = std::ceil(dm_value);
+            break;
+          case 0x3:  // vrintm - floor
+            dd_value = std::floor(dm_value);
+            break;
+          default:
+            UNREACHABLE();  // Case analysis is exhaustive.
+            break;
+        }
+        dd_value = canonicalizeNaN(dd_value);
+        set_d_register_from_double(vd, dd_value);
+      } else {
+        UNIMPLEMENTED();
+      }
+      break;
     default:
       UNIMPLEMENTED();
       break;
diff --git a/src/ast-value-factory.cc b/src/ast-value-factory.cc
index 0a1949a..895ce39 100644
--- a/src/ast-value-factory.cc
+++ b/src/ast-value-factory.cc
@@ -159,9 +159,6 @@
       return DoubleToBoolean(number_);
     case SMI:
       return smi_ != 0;
-    case STRING_ARRAY:
-      UNREACHABLE();
-      break;
     case BOOLEAN:
       return bool_;
     case NULL_TYPE:
@@ -202,22 +199,6 @@
         value_ = isolate->factory()->false_value();
       }
       break;
-    case STRING_ARRAY: {
-      DCHECK(strings_ != NULL);
-      Factory* factory = isolate->factory();
-      int len = strings_->length();
-      Handle<FixedArray> elements = factory->NewFixedArray(len, TENURED);
-      for (int i = 0; i < len; i++) {
-        const AstRawString* string = (*strings_)[i];
-        Handle<Object> element = string->string();
-        // Strings are already internalized.
-        DCHECK(!element.is_null());
-        elements->set(i, *element);
-      }
-      value_ =
-          factory->NewJSArrayWithElements(elements, FAST_ELEMENTS, TENURED);
-      break;
-    }
     case NULL_TYPE:
       value_ = isolate->factory()->null_value();
       break;
@@ -350,17 +331,6 @@
 }
 
 
-const AstValue* AstValueFactory::NewStringList(
-    ZoneList<const AstRawString*>* strings) {
-  AstValue* value = new (zone_) AstValue(strings);
-  if (isolate_) {
-    value->Internalize(isolate_);
-  }
-  values_.Add(value);
-  return value;
-}
-
-
 const AstValue* AstValueFactory::NewNull() {
   GENERATE_VALUE_GETTER(null_value_, AstValue::NULL_TYPE);
 }
diff --git a/src/ast-value-factory.h b/src/ast-value-factory.h
index 774e534..071ca9c 100644
--- a/src/ast-value-factory.h
+++ b/src/ast-value-factory.h
@@ -194,7 +194,6 @@
     NUMBER,
     SMI,
     BOOLEAN,
-    STRING_ARRAY,
     NULL_TYPE,
     UNDEFINED,
     THE_HOLE
@@ -213,10 +212,6 @@
 
   explicit AstValue(bool b) : type_(BOOLEAN) { bool_ = b; }
 
-  explicit AstValue(ZoneList<const AstRawString*>* s) : type_(STRING_ARRAY) {
-    strings_ = s;
-  }
-
   explicit AstValue(Type t) : type_(t) {
     DCHECK(t == NULL_TYPE || t == UNDEFINED || t == THE_HOLE);
   }
@@ -239,33 +234,33 @@
 
 
 // For generating constants.
-#define STRING_CONSTANTS(F)                           \
-  F(anonymous_function, "(anonymous function)")       \
-  F(arguments, "arguments")                           \
-  F(constructor, "constructor")                       \
-  F(done, "done")                                     \
-  F(dot, ".")                                         \
-  F(dot_for, ".for")                                  \
-  F(dot_generator, ".generator")                      \
-  F(dot_generator_object, ".generator_object")        \
-  F(dot_iterator, ".iterator")                        \
-  F(dot_module, ".module")                            \
-  F(dot_result, ".result")                            \
-  F(empty, "")                                        \
-  F(eval, "eval")                                     \
-  F(initialize_const_global, "initializeConstGlobal") \
-  F(initialize_var_global, "initializeVarGlobal")     \
-  F(make_reference_error, "MakeReferenceError")       \
-  F(make_syntax_error, "MakeSyntaxError")             \
-  F(make_type_error, "MakeTypeError")                 \
-  F(module, "module")                                 \
-  F(native, "native")                                 \
-  F(next, "next")                                     \
-  F(proto, "__proto__")                               \
-  F(prototype, "prototype")                           \
-  F(this, "this")                                     \
-  F(use_asm, "use asm")                               \
-  F(use_strict, "use strict")                         \
+#define STRING_CONSTANTS(F)                             \
+  F(anonymous_function, "(anonymous function)")         \
+  F(arguments, "arguments")                             \
+  F(constructor, "constructor")                         \
+  F(done, "done")                                       \
+  F(dot, ".")                                           \
+  F(dot_for, ".for")                                    \
+  F(dot_generator, ".generator")                        \
+  F(dot_generator_object, ".generator_object")          \
+  F(dot_iterator, ".iterator")                          \
+  F(dot_module, ".module")                              \
+  F(dot_result, ".result")                              \
+  F(empty, "")                                          \
+  F(eval, "eval")                                       \
+  F(initialize_const_global, "initializeConstGlobal")   \
+  F(initialize_var_global, "initializeVarGlobal")       \
+  F(make_reference_error, "MakeReferenceErrorEmbedded") \
+  F(make_syntax_error, "MakeSyntaxErrorEmbedded")       \
+  F(make_type_error, "MakeTypeErrorEmbedded")           \
+  F(module, "module")                                   \
+  F(native, "native")                                   \
+  F(next, "next")                                       \
+  F(proto, "__proto__")                                 \
+  F(prototype, "prototype")                             \
+  F(this, "this")                                       \
+  F(use_asm, "use asm")                                 \
+  F(use_strict, "use strict")                           \
   F(value, "value")
 
 #define OTHER_CONSTANTS(F) \
diff --git a/src/ast.h b/src/ast.h
index 1c1b95b..6b11d79 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -3415,13 +3415,6 @@
     VISIT_AND_RETURN(Literal, lit)
   }
 
-  Literal* NewStringListLiteral(ZoneList<const AstRawString*>* strings,
-                                int pos) {
-    Literal* lit = new (zone_)
-        Literal(zone_, ast_value_factory_->NewStringList(strings), pos);
-    VISIT_AND_RETURN(Literal, lit)
-  }
-
   Literal* NewNullLiteral(int pos) {
     Literal* lit =
         new (zone_) Literal(zone_, ast_value_factory_->NewNull(), pos);
diff --git a/src/compiler.cc b/src/compiler.cc
index e894e10..b7e0c98 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -845,14 +845,17 @@
 
 
 MaybeHandle<Code> Compiler::GetLazyCode(Handle<JSFunction> function) {
-  DCHECK(!function->GetIsolate()->has_pending_exception());
+  Isolate* isolate = function->GetIsolate();
+  DCHECK(!isolate->has_pending_exception());
   DCHECK(!function->is_compiled());
-
-  if (FLAG_turbo_asm && function->shared()->asm_function()) {
+  // If the debugger is active, do not compile with turbofan unless we can
+  // deopt from turbofan code.
+  if (FLAG_turbo_asm && function->shared()->asm_function() &&
+      (FLAG_turbo_deoptimization || !isolate->debug()->is_active())) {
     CompilationInfoWithZone info(function);
 
-    VMState<COMPILER> state(info.isolate());
-    PostponeInterruptsScope postpone(info.isolate());
+    VMState<COMPILER> state(isolate);
+    PostponeInterruptsScope postpone(isolate);
 
     info.SetOptimizing(BailoutId::None(),
                        Handle<Code>(function->shared()->code()));
@@ -861,7 +864,10 @@
     info.MarkAsTypingEnabled();
     info.MarkAsInliningDisabled();
 
-    if (GetOptimizedCodeNow(&info)) return info.code();
+    if (GetOptimizedCodeNow(&info)) {
+      DCHECK(function->shared()->is_compiled());
+      return info.code();
+    }
   }
 
   if (function->shared()->is_compiled()) {
@@ -870,13 +876,12 @@
 
   CompilationInfoWithZone info(function);
   Handle<Code> result;
-  ASSIGN_RETURN_ON_EXCEPTION(info.isolate(), result,
-                             GetUnoptimizedCodeCommon(&info), Code);
+  ASSIGN_RETURN_ON_EXCEPTION(isolate, result, GetUnoptimizedCodeCommon(&info),
+                             Code);
 
-  if (FLAG_always_opt &&
-      info.isolate()->use_crankshaft() &&
+  if (FLAG_always_opt && isolate->use_crankshaft() &&
       !info.shared_info()->optimization_disabled() &&
-      !info.isolate()->DebuggerHasBreakPoints()) {
+      !isolate->DebuggerHasBreakPoints()) {
     Handle<Code> opt_code;
     if (Compiler::GetOptimizedCode(
             function, result,
diff --git a/src/compiler/arm/code-generator-arm.cc b/src/compiler/arm/code-generator-arm.cc
index c0e5004..814f5e7 100644
--- a/src/compiler/arm/code-generator-arm.cc
+++ b/src/compiler/arm/code-generator-arm.cc
@@ -363,6 +363,18 @@
     case kArmVsqrtF64:
       __ vsqrt(i.OutputFloat64Register(), i.InputFloat64Register(0));
       break;
+    case kArmVfloorF64:
+      __ vrintm(i.OutputFloat64Register(), i.InputFloat64Register(0));
+      break;
+    case kArmVceilF64:
+      __ vrintp(i.OutputFloat64Register(), i.InputFloat64Register(0));
+      break;
+    case kArmVroundTruncateF64:
+      __ vrintz(i.OutputFloat64Register(), i.InputFloat64Register(0));
+      break;
+    case kArmVroundTiesAwayF64:
+      __ vrinta(i.OutputFloat64Register(), i.InputFloat64Register(0));
+      break;
     case kArmVnegF64:
       __ vneg(i.OutputFloat64Register(), i.InputFloat64Register(0));
       break;
diff --git a/src/compiler/arm/instruction-codes-arm.h b/src/compiler/arm/instruction-codes-arm.h
index 3c97482..04559a3 100644
--- a/src/compiler/arm/instruction-codes-arm.h
+++ b/src/compiler/arm/instruction-codes-arm.h
@@ -44,6 +44,10 @@
   V(ArmVmodF64)                    \
   V(ArmVnegF64)                    \
   V(ArmVsqrtF64)                   \
+  V(ArmVfloorF64)                  \
+  V(ArmVceilF64)                   \
+  V(ArmVroundTruncateF64)          \
+  V(ArmVroundTiesAwayF64)          \
   V(ArmVcvtF32F64)                 \
   V(ArmVcvtF64F32)                 \
   V(ArmVcvtF64S32)                 \
diff --git a/src/compiler/arm/instruction-selector-arm.cc b/src/compiler/arm/instruction-selector-arm.cc
index 56b1bf0..3f3aff4 100644
--- a/src/compiler/arm/instruction-selector-arm.cc
+++ b/src/compiler/arm/instruction-selector-arm.cc
@@ -100,6 +100,10 @@
       case kArmVmodF64:
       case kArmVnegF64:
       case kArmVsqrtF64:
+      case kArmVfloorF64:
+      case kArmVceilF64:
+      case kArmVroundTruncateF64:
+      case kArmVroundTiesAwayF64:
       case kArmVcvtF32F64:
       case kArmVcvtF64F32:
       case kArmVcvtF64S32:
@@ -115,6 +119,14 @@
 };
 
 
+static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
+                           Node* node) {
+  ArmOperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)));
+}
+
+
 static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
                             Node* node) {
   ArmOperandGenerator g(selector);
@@ -826,6 +838,30 @@
 }
 
 
+void InstructionSelector::VisitFloat64Floor(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  VisitRRFloat64(this, kArmVfloorF64, node);
+}
+
+
+void InstructionSelector::VisitFloat64Ceil(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  VisitRRFloat64(this, kArmVceilF64, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  VisitRRFloat64(this, kArmVroundTruncateF64, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(ARMv8));
+  VisitRRFloat64(this, kArmVroundTiesAwayF64, node);
+}
+
+
 void InstructionSelector::VisitCall(Node* node) {
   ArmOperandGenerator g(this);
   CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
@@ -1139,10 +1175,19 @@
 // static
 MachineOperatorBuilder::Flags
 InstructionSelector::SupportedMachineOperatorFlags() {
-  return MachineOperatorBuilder::kInt32DivIsSafe |
-         MachineOperatorBuilder::kInt32ModIsSafe |
-         MachineOperatorBuilder::kUint32DivIsSafe |
-         MachineOperatorBuilder::kUint32ModIsSafe;
+  MachineOperatorBuilder::Flags flags =
+      MachineOperatorBuilder::kInt32DivIsSafe |
+      MachineOperatorBuilder::kInt32ModIsSafe |
+      MachineOperatorBuilder::kUint32DivIsSafe |
+      MachineOperatorBuilder::kUint32ModIsSafe;
+
+  if (CpuFeatures::IsSupported(ARMv8)) {
+    flags |= MachineOperatorBuilder::kFloat64Floor |
+             MachineOperatorBuilder::kFloat64Ceil |
+             MachineOperatorBuilder::kFloat64RoundTruncate |
+             MachineOperatorBuilder::kFloat64RoundTiesAway;
+  }
+  return flags;
 }
 
 }  // namespace compiler
diff --git a/src/compiler/arm/linkage-arm.cc b/src/compiler/arm/linkage-arm.cc
index 6673a47..9a65ccc 100644
--- a/src/compiler/arm/linkage-arm.cc
+++ b/src/compiler/arm/linkage-arm.cc
@@ -49,7 +49,7 @@
 
 
 CallDescriptor* Linkage::GetStubCallDescriptor(
-    CallInterfaceDescriptor descriptor, int stack_parameter_count,
+    const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
     CallDescriptor::Flags flags, Zone* zone) {
   return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
                                    flags);
diff --git a/src/compiler/arm64/code-generator-arm64.cc b/src/compiler/arm64/code-generator-arm64.cc
index c3a4f40..38c6531 100644
--- a/src/compiler/arm64/code-generator-arm64.cc
+++ b/src/compiler/arm64/code-generator-arm64.cc
@@ -222,6 +222,18 @@
     case kArchTruncateDoubleToI:
       __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
       break;
+    case kArm64Float64Ceil:
+      __ Frintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
+    case kArm64Float64Floor:
+      __ Frintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
+    case kArm64Float64RoundTruncate:
+      __ Frintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
+    case kArm64Float64RoundTiesAway:
+      __ Frinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
     case kArm64Add:
       __ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
       break;
diff --git a/src/compiler/arm64/instruction-codes-arm64.h b/src/compiler/arm64/instruction-codes-arm64.h
index 63756d1..ab14a7c 100644
--- a/src/compiler/arm64/instruction-codes-arm64.h
+++ b/src/compiler/arm64/instruction-codes-arm64.h
@@ -78,6 +78,10 @@
   V(Arm64Float64Div)               \
   V(Arm64Float64Mod)               \
   V(Arm64Float64Sqrt)              \
+  V(Arm64Float64Floor)             \
+  V(Arm64Float64Ceil)              \
+  V(Arm64Float64RoundTruncate)     \
+  V(Arm64Float64RoundTiesAway)     \
   V(Arm64Float32ToFloat64)         \
   V(Arm64Float64ToFloat32)         \
   V(Arm64Float64ToInt32)           \
diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc
index 40cb207..39cd6ca 100644
--- a/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/src/compiler/arm64/instruction-selector-arm64.cc
@@ -86,6 +86,14 @@
 };
 
 
+static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
+                           Node* node) {
+  Arm64OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)));
+}
+
+
 static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
                      Node* node) {
   Arm64OperandGenerator g(selector);
@@ -899,9 +907,27 @@
 
 
 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
-  Arm64OperandGenerator g(this);
-  Emit(kArm64Float64Sqrt, g.DefineAsRegister(node),
-       g.UseRegister(node->InputAt(0)));
+  VisitRRFloat64(this, kArm64Float64Sqrt, node);
+}
+
+
+void InstructionSelector::VisitFloat64Floor(Node* node) {
+  VisitRRFloat64(this, kArm64Float64Floor, node);
+}
+
+
+void InstructionSelector::VisitFloat64Ceil(Node* node) {
+  VisitRRFloat64(this, kArm64Float64Ceil, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+  VisitRRFloat64(this, kArm64Float64RoundTruncate, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+  VisitRRFloat64(this, kArm64Float64RoundTiesAway, node);
 }
 
 
@@ -1317,9 +1343,11 @@
 // static
 MachineOperatorBuilder::Flags
 InstructionSelector::SupportedMachineOperatorFlags() {
-  return MachineOperatorBuilder::kNoFlags;
+  return MachineOperatorBuilder::kFloat64Floor |
+         MachineOperatorBuilder::kFloat64Ceil |
+         MachineOperatorBuilder::kFloat64RoundTruncate |
+         MachineOperatorBuilder::kFloat64RoundTiesAway;
 }
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/arm64/linkage-arm64.cc b/src/compiler/arm64/linkage-arm64.cc
index 2be2cb1..c50736c 100644
--- a/src/compiler/arm64/linkage-arm64.cc
+++ b/src/compiler/arm64/linkage-arm64.cc
@@ -49,7 +49,7 @@
 
 
 CallDescriptor* Linkage::GetStubCallDescriptor(
-    CallInterfaceDescriptor descriptor, int stack_parameter_count,
+    const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
     CallDescriptor::Flags flags, Zone* zone) {
   return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
                                    flags);
diff --git a/src/compiler/change-lowering.cc b/src/compiler/change-lowering.cc
index e89a09a..3b39980 100644
--- a/src/compiler/change-lowering.cc
+++ b/src/compiler/change-lowering.cc
@@ -46,14 +46,14 @@
   STATIC_ASSERT(HeapNumber::kValueOffset % kPointerSize == 0);
   const int heap_number_value_offset =
       ((HeapNumber::kValueOffset / kPointerSize) * (machine()->Is64() ? 8 : 4));
-  return jsgraph()->Int32Constant(heap_number_value_offset - kHeapObjectTag);
+  return jsgraph()->IntPtrConstant(heap_number_value_offset - kHeapObjectTag);
 }
 
 
 Node* ChangeLowering::SmiMaxValueConstant() {
   const int smi_value_size = machine()->Is32() ? SmiTagging<4>::SmiValueSize()
                                                : SmiTagging<8>::SmiValueSize();
-  return jsgraph()->Int32Constant(
+  return jsgraph()->IntPtrConstant(
       -(static_cast<int>(0xffffffffu << (smi_value_size - 1)) + 1));
 }
 
@@ -61,7 +61,7 @@
 Node* ChangeLowering::SmiShiftBitsConstant() {
   const int smi_shift_size = machine()->Is32() ? SmiTagging<4>::SmiShiftSize()
                                                : SmiTagging<8>::SmiShiftSize();
-  return jsgraph()->Int32Constant(smi_shift_size + kSmiTagSize);
+  return jsgraph()->IntPtrConstant(smi_shift_size + kSmiTagSize);
 }
 
 
@@ -166,7 +166,7 @@
   STATIC_ASSERT(kSmiTagMask == 1);
 
   Node* tag = graph()->NewNode(machine()->WordAnd(), val,
-                               jsgraph()->Int32Constant(kSmiTagMask));
+                               jsgraph()->IntPtrConstant(kSmiTagMask));
   Node* branch = graph()->NewNode(common()->Branch(), tag, control);
 
   Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
@@ -192,7 +192,7 @@
   STATIC_ASSERT(kSmiTagMask == 1);
 
   Node* tag = graph()->NewNode(machine()->WordAnd(), val,
-                               jsgraph()->Int32Constant(kSmiTagMask));
+                               jsgraph()->IntPtrConstant(kSmiTagMask));
   Node* branch = graph()->NewNode(common()->Branch(), tag, control);
 
   Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
diff --git a/src/compiler/code-generator-impl.h b/src/compiler/code-generator-impl.h
index 92d3b05..0be2626 100644
--- a/src/compiler/code-generator-impl.h
+++ b/src/compiler/code-generator-impl.h
@@ -5,10 +5,12 @@
 #ifndef V8_COMPILER_CODE_GENERATOR_IMPL_H_
 #define V8_COMPILER_CODE_GENERATOR_IMPL_H_
 
+#include "src/code-stubs.h"
 #include "src/compiler/code-generator.h"
 #include "src/compiler/instruction.h"
 #include "src/compiler/linkage.h"
 #include "src/compiler/opcodes.h"
+#include "src/macro-assembler.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/compiler/code-generator.h b/src/compiler/code-generator.h
index 494f91e..2184a84 100644
--- a/src/compiler/code-generator.h
+++ b/src/compiler/code-generator.h
@@ -17,6 +17,8 @@
 namespace internal {
 namespace compiler {
 
+class Linkage;
+
 // Generates native code for a sequence of instructions.
 class CodeGenerator FINAL : public GapResolver::Assembler {
  public:
diff --git a/src/compiler/common-operator.cc b/src/compiler/common-operator.cc
index 571a7f5..b0af2fd 100644
--- a/src/compiler/common-operator.cc
+++ b/src/compiler/common-operator.cc
@@ -7,6 +7,8 @@
 #include "src/assembler.h"
 #include "src/base/lazy-instance.h"
 #include "src/compiler/linkage.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
 #include "src/unique.h"
 #include "src/zone.h"
 
diff --git a/src/compiler/graph-visualizer.cc b/src/compiler/graph-visualizer.cc
index 67b933f..19f533b 100644
--- a/src/compiler/graph-visualizer.cc
+++ b/src/compiler/graph-visualizer.cc
@@ -7,6 +7,7 @@
 #include <sstream>
 #include <string>
 
+#include "src/code-stubs.h"
 #include "src/compiler/generic-algorithm.h"
 #include "src/compiler/generic-node.h"
 #include "src/compiler/generic-node-inl.h"
@@ -673,19 +674,16 @@
   Tag tag(this, "intervals");
   PrintStringProperty("name", phase);
 
-  const Vector<LiveRange*>* fixed_d = allocator->fixed_double_live_ranges();
-  for (int i = 0; i < fixed_d->length(); ++i) {
-    PrintLiveRange(fixed_d->at(i), "fixed");
+  for (auto range : allocator->fixed_double_live_ranges()) {
+    PrintLiveRange(range, "fixed");
   }
 
-  const Vector<LiveRange*>* fixed = allocator->fixed_live_ranges();
-  for (int i = 0; i < fixed->length(); ++i) {
-    PrintLiveRange(fixed->at(i), "fixed");
+  for (auto range : allocator->fixed_live_ranges()) {
+    PrintLiveRange(range, "fixed");
   }
 
-  const ZoneList<LiveRange*>* live_ranges = allocator->live_ranges();
-  for (int i = 0; i < live_ranges->length(); ++i) {
-    PrintLiveRange(live_ranges->at(i), "object");
+  for (auto range : allocator->live_ranges()) {
+    PrintLiveRange(range, "object");
   }
 }
 
diff --git a/src/compiler/ia32/code-generator-ia32.cc b/src/compiler/ia32/code-generator-ia32.cc
index d7098ae..0044483 100644
--- a/src/compiler/ia32/code-generator-ia32.cc
+++ b/src/compiler/ia32/code-generator-ia32.cc
@@ -353,6 +353,24 @@
     case kSSEFloat64Sqrt:
       __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
       break;
+    case kSSEFloat64Floor: {
+      CpuFeatureScope sse_scope(masm(), SSE4_1);
+      __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                 v8::internal::Assembler::kRoundDown);
+      break;
+    }
+    case kSSEFloat64Ceil: {
+      CpuFeatureScope sse_scope(masm(), SSE4_1);
+      __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                 v8::internal::Assembler::kRoundUp);
+      break;
+    }
+    case kSSEFloat64RoundTruncate: {
+      CpuFeatureScope sse_scope(masm(), SSE4_1);
+      __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                 v8::internal::Assembler::kRoundToZero);
+      break;
+    }
     case kSSECvtss2sd:
       __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
       break;
diff --git a/src/compiler/ia32/instruction-codes-ia32.h b/src/compiler/ia32/instruction-codes-ia32.h
index 251a489..c922a3d 100644
--- a/src/compiler/ia32/instruction-codes-ia32.h
+++ b/src/compiler/ia32/instruction-codes-ia32.h
@@ -36,6 +36,9 @@
   V(SSEFloat64Div)                 \
   V(SSEFloat64Mod)                 \
   V(SSEFloat64Sqrt)                \
+  V(SSEFloat64Floor)               \
+  V(SSEFloat64Ceil)                \
+  V(SSEFloat64RoundTruncate)       \
   V(SSECvtss2sd)                   \
   V(SSECvtsd2ss)                   \
   V(SSEFloat64ToInt32)             \
diff --git a/src/compiler/ia32/instruction-selector-ia32.cc b/src/compiler/ia32/instruction-selector-ia32.cc
index e1a9cb7..ca33ab0 100644
--- a/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/src/compiler/ia32/instruction-selector-ia32.cc
@@ -53,6 +53,178 @@
 }
 
 
+// Fairly intel-specify node matcher used for matching scale factors in
+// addressing modes.
+// Matches nodes of form [x * N] for N in {1,2,4,8}
+class ScaleFactorMatcher : public NodeMatcher {
+ public:
+  static const int kMatchedFactors[4];
+
+  explicit ScaleFactorMatcher(Node* node);
+
+  bool Matches() const { return left_ != NULL; }
+  int Power() const {
+    DCHECK(Matches());
+    return power_;
+  }
+  Node* Left() const {
+    DCHECK(Matches());
+    return left_;
+  }
+
+ private:
+  Node* left_;
+  int power_;
+};
+
+
+// Fairly intel-specify node matcher used for matching index and displacement
+// operands in addressing modes.
+// Matches nodes of form:
+//  [x * N]
+//  [x * N + K]
+//  [x + K]
+//  [x] -- fallback case
+// for N in {1,2,4,8} and K int32_t
+class IndexAndDisplacementMatcher : public NodeMatcher {
+ public:
+  explicit IndexAndDisplacementMatcher(Node* node);
+
+  Node* index_node() const { return index_node_; }
+  int displacement() const { return displacement_; }
+  int power() const { return power_; }
+
+ private:
+  Node* index_node_;
+  int displacement_;
+  int power_;
+};
+
+
+// Fairly intel-specify node matcher used for matching multiplies that can be
+// transformed to lea instructions.
+// Matches nodes of form:
+//  [x * N]
+// for N in {1,2,3,4,5,8,9}
+class LeaMultiplyMatcher : public NodeMatcher {
+ public:
+  static const int kMatchedFactors[7];
+
+  explicit LeaMultiplyMatcher(Node* node);
+
+  bool Matches() const { return left_ != NULL; }
+  int Power() const {
+    DCHECK(Matches());
+    return power_;
+  }
+  Node* Left() const {
+    DCHECK(Matches());
+    return left_;
+  }
+  // Displacement will be either 0 or 1.
+  int32_t Displacement() const {
+    DCHECK(Matches());
+    return displacement_;
+  }
+
+ private:
+  Node* left_;
+  int power_;
+  int displacement_;
+};
+
+
+const int ScaleFactorMatcher::kMatchedFactors[] = {1, 2, 4, 8};
+
+
+ScaleFactorMatcher::ScaleFactorMatcher(Node* node)
+    : NodeMatcher(node), left_(NULL), power_(0) {
+  if (opcode() != IrOpcode::kInt32Mul) return;
+  // TODO(dcarney): should test 64 bit ints as well.
+  Int32BinopMatcher m(this->node());
+  if (!m.right().HasValue()) return;
+  int32_t value = m.right().Value();
+  switch (value) {
+    case 8:
+      power_++;  // Fall through.
+    case 4:
+      power_++;  // Fall through.
+    case 2:
+      power_++;  // Fall through.
+    case 1:
+      break;
+    default:
+      return;
+  }
+  left_ = m.left().node();
+}
+
+
+IndexAndDisplacementMatcher::IndexAndDisplacementMatcher(Node* node)
+    : NodeMatcher(node), index_node_(node), displacement_(0), power_(0) {
+  if (opcode() == IrOpcode::kInt32Add) {
+    Int32BinopMatcher m(this->node());
+    if (m.right().HasValue()) {
+      displacement_ = m.right().Value();
+      index_node_ = m.left().node();
+    }
+  }
+  // Test scale factor.
+  ScaleFactorMatcher scale_matcher(index_node_);
+  if (scale_matcher.Matches()) {
+    index_node_ = scale_matcher.Left();
+    power_ = scale_matcher.Power();
+  }
+}
+
+
+const int LeaMultiplyMatcher::kMatchedFactors[7] = {1, 2, 3, 4, 5, 8, 9};
+
+
+LeaMultiplyMatcher::LeaMultiplyMatcher(Node* node)
+    : NodeMatcher(node), left_(NULL), power_(0), displacement_(0) {
+  if (opcode() != IrOpcode::kInt32Mul && opcode() != IrOpcode::kInt64Mul) {
+    return;
+  }
+  int64_t value;
+  Node* left = NULL;
+  {
+    Int32BinopMatcher m(this->node());
+    if (m.right().HasValue()) {
+      value = m.right().Value();
+      left = m.left().node();
+    } else {
+      Int64BinopMatcher m(this->node());
+      if (m.right().HasValue()) {
+        value = m.right().Value();
+        left = m.left().node();
+      } else {
+        return;
+      }
+    }
+  }
+  switch (value) {
+    case 9:
+    case 8:
+      power_++;  // Fall through.
+    case 5:
+    case 4:
+      power_++;  // Fall through.
+    case 3:
+    case 2:
+      power_++;  // Fall through.
+    case 1:
+      break;
+    default:
+      return;
+  }
+  if (!base::bits::IsPowerOfTwo64(value)) {
+    displacement_ = 1;
+  }
+  left_ = left;
+}
+
+
 class AddressingModeMatcher {
  public:
   AddressingModeMatcher(IA32OperandGenerator* g, Node* base, Node* index)
@@ -142,6 +314,14 @@
 };
 
 
+static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
+                           Node* node) {
+  IA32OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)));
+}
+
+
 void InstructionSelector::VisitLoad(Node* node) {
   MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
   MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
@@ -596,6 +776,29 @@
 }
 
 
+void InstructionSelector::VisitFloat64Floor(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(SSE4_1));
+  VisitRRFloat64(this, kSSEFloat64Floor, node);
+}
+
+
+void InstructionSelector::VisitFloat64Ceil(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(SSE4_1));
+  VisitRRFloat64(this, kSSEFloat64Ceil, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(SSE4_1));
+  VisitRRFloat64(this, kSSEFloat64RoundTruncate, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+  UNREACHABLE();
+}
+
+
 void InstructionSelector::VisitCall(Node* node) {
   IA32OperandGenerator g(this);
   CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
@@ -881,9 +1084,13 @@
 // static
 MachineOperatorBuilder::Flags
 InstructionSelector::SupportedMachineOperatorFlags() {
-  return MachineOperatorBuilder::kNoFlags;
+  if (CpuFeatures::IsSupported(SSE4_1)) {
+    return MachineOperatorBuilder::kFloat64Floor |
+           MachineOperatorBuilder::kFloat64Ceil |
+           MachineOperatorBuilder::kFloat64RoundTruncate;
+  }
+  return MachineOperatorBuilder::Flag::kNoFlags;
 }
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/ia32/linkage-ia32.cc b/src/compiler/ia32/linkage-ia32.cc
index f2c5fab..8d85a9d 100644
--- a/src/compiler/ia32/linkage-ia32.cc
+++ b/src/compiler/ia32/linkage-ia32.cc
@@ -44,7 +44,7 @@
 
 
 CallDescriptor* Linkage::GetStubCallDescriptor(
-    CallInterfaceDescriptor descriptor, int stack_parameter_count,
+    const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
     CallDescriptor::Flags flags, Zone* zone) {
   return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
                                    flags);
diff --git a/src/compiler/instruction-selector-impl.h b/src/compiler/instruction-selector-impl.h
index 53d508a..53e288d 100644
--- a/src/compiler/instruction-selector-impl.h
+++ b/src/compiler/instruction-selector-impl.h
@@ -9,6 +9,7 @@
 #include "src/compiler/instruction.h"
 #include "src/compiler/instruction-selector.h"
 #include "src/compiler/linkage.h"
+#include "src/macro-assembler.h"
 
 namespace v8 {
 namespace internal {
@@ -45,7 +46,8 @@
 
   InstructionOperand* DefineAsConstant(Node* node) {
     selector()->MarkAsDefined(node);
-    int virtual_register = sequence()->AddConstant(node, ToConstant(node));
+    int virtual_register = selector_->GetVirtualRegister(node);
+    sequence()->AddConstant(virtual_register, ToConstant(node));
     return ConstantOperand::Create(virtual_register, zone());
   }
 
@@ -171,8 +173,7 @@
   UnallocatedOperand* Define(Node* node, UnallocatedOperand* operand) {
     DCHECK_NOT_NULL(node);
     DCHECK_NOT_NULL(operand);
-    operand->set_virtual_register(
-        selector_->sequence()->GetVirtualRegister(node));
+    operand->set_virtual_register(selector_->GetVirtualRegister(node));
     selector()->MarkAsDefined(node);
     return operand;
   }
@@ -180,8 +181,7 @@
   UnallocatedOperand* Use(Node* node, UnallocatedOperand* operand) {
     DCHECK_NOT_NULL(node);
     DCHECK_NOT_NULL(operand);
-    operand->set_virtual_register(
-        selector_->sequence()->GetVirtualRegister(node));
+    operand->set_virtual_register(selector_->GetVirtualRegister(node));
     selector()->MarkAsUsed(node);
     return operand;
   }
diff --git a/src/compiler/instruction-selector.cc b/src/compiler/instruction-selector.cc
index 22da0b5..122bb5f 100644
--- a/src/compiler/instruction-selector.cc
+++ b/src/compiler/instruction-selector.cc
@@ -4,6 +4,7 @@
 
 #include "src/compiler/instruction-selector.h"
 
+#include "src/compiler/graph.h"
 #include "src/compiler/instruction-selector-impl.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties-inl.h"
@@ -13,7 +14,8 @@
 namespace internal {
 namespace compiler {
 
-InstructionSelector::InstructionSelector(Zone* local_zone, Linkage* linkage,
+InstructionSelector::InstructionSelector(Zone* local_zone, Graph* graph,
+                                         Linkage* linkage,
                                          InstructionSequence* sequence,
                                          Schedule* schedule,
                                          SourcePositionTable* source_positions,
@@ -24,10 +26,11 @@
       source_positions_(source_positions),
       features_(features),
       schedule_(schedule),
+      node_map_(graph->NodeCount(), kNodeUnmapped, zone()),
       current_block_(NULL),
       instructions_(zone()),
-      defined_(sequence->node_count(), false, zone()),
-      used_(sequence->node_count(), false, zone()) {}
+      defined_(graph->NodeCount(), false, zone()),
+      used_(graph->NodeCount(), false, zone()) {}
 
 
 void InstructionSelector::SelectInstructions() {
@@ -157,6 +160,19 @@
 }
 
 
+int InstructionSelector::GetVirtualRegister(const Node* node) {
+  if (node_map_[node->id()] == kNodeUnmapped) {
+    node_map_[node->id()] = sequence()->NextVirtualRegister();
+  }
+  return node_map_[node->id()];
+}
+
+
+int InstructionSelector::GetMappedVirtualRegister(const Node* node) const {
+  return node_map_[node->id()];
+}
+
+
 bool InstructionSelector::IsDefined(Node* node) const {
   DCHECK_NOT_NULL(node);
   NodeId id = node->id();
@@ -195,27 +211,31 @@
 
 bool InstructionSelector::IsDouble(const Node* node) const {
   DCHECK_NOT_NULL(node);
-  return sequence()->IsDouble(sequence()->GetVirtualRegister(node));
+  int virtual_register = GetMappedVirtualRegister(node);
+  if (virtual_register == kNodeUnmapped) return false;
+  return sequence()->IsDouble(virtual_register);
 }
 
 
 void InstructionSelector::MarkAsDouble(Node* node) {
   DCHECK_NOT_NULL(node);
   DCHECK(!IsReference(node));
-  sequence()->MarkAsDouble(sequence()->GetVirtualRegister(node));
+  sequence()->MarkAsDouble(GetVirtualRegister(node));
 }
 
 
 bool InstructionSelector::IsReference(const Node* node) const {
   DCHECK_NOT_NULL(node);
-  return sequence()->IsReference(sequence()->GetVirtualRegister(node));
+  int virtual_register = GetMappedVirtualRegister(node);
+  if (virtual_register == kNodeUnmapped) return false;
+  return sequence()->IsReference(virtual_register);
 }
 
 
 void InstructionSelector::MarkAsReference(Node* node) {
   DCHECK_NOT_NULL(node);
   DCHECK(!IsDouble(node));
-  sequence()->MarkAsReference(sequence()->GetVirtualRegister(node));
+  sequence()->MarkAsReference(GetVirtualRegister(node));
 }
 
 
@@ -583,6 +603,10 @@
     case IrOpcode::kFloat64Div:
     case IrOpcode::kFloat64Mod:
     case IrOpcode::kFloat64Sqrt:
+    case IrOpcode::kFloat64Floor:
+    case IrOpcode::kFloat64Ceil:
+    case IrOpcode::kFloat64RoundTruncate:
+    case IrOpcode::kFloat64RoundTiesAway:
       return kMachFloat64;
     case IrOpcode::kFloat64Equal:
     case IrOpcode::kFloat64LessThan:
@@ -772,11 +796,20 @@
       return VisitFloat64LessThan(node);
     case IrOpcode::kFloat64LessThanOrEqual:
       return VisitFloat64LessThanOrEqual(node);
+    case IrOpcode::kFloat64Floor:
+      return MarkAsDouble(node), VisitFloat64Floor(node);
+    case IrOpcode::kFloat64Ceil:
+      return MarkAsDouble(node), VisitFloat64Ceil(node);
+    case IrOpcode::kFloat64RoundTruncate:
+      return MarkAsDouble(node), VisitFloat64RoundTruncate(node);
+    case IrOpcode::kFloat64RoundTiesAway:
+      return MarkAsDouble(node), VisitFloat64RoundTiesAway(node);
     case IrOpcode::kLoadStackPointer:
       return VisitLoadStackPointer(node);
     default:
       V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
                node->opcode(), node->op()->mnemonic(), node->id());
+      break;
   }
 }
 
@@ -892,14 +925,14 @@
 void InstructionSelector::VisitPhi(Node* node) {
   // TODO(bmeurer): Emit a PhiInstruction here.
   PhiInstruction* phi = new (instruction_zone())
-      PhiInstruction(instruction_zone(), sequence()->GetVirtualRegister(node));
+      PhiInstruction(instruction_zone(), GetVirtualRegister(node));
   sequence()->InstructionBlockAt(current_block_->GetRpoNumber())->AddPhi(phi);
   const int input_count = node->op()->InputCount();
   phi->operands().reserve(static_cast<size_t>(input_count));
   for (int i = 0; i < input_count; ++i) {
     Node* const input = node->InputAt(i);
     MarkAsUsed(input);
-    phi->operands().push_back(sequence()->GetVirtualRegister(input));
+    phi->operands().push_back(GetVirtualRegister(input));
   }
 }
 
diff --git a/src/compiler/instruction-selector.h b/src/compiler/instruction-selector.h
index 5af2c9d..4e916be 100644
--- a/src/compiler/instruction-selector.h
+++ b/src/compiler/instruction-selector.h
@@ -19,13 +19,19 @@
 // Forward declarations.
 struct CallBuffer;  // TODO(bmeurer): Remove this.
 class FlagsContinuation;
+class Linkage;
+
+typedef IntVector NodeToVregMap;
 
 class InstructionSelector FINAL {
  public:
+  static const int kNodeUnmapped = -1;
+
   // Forward declarations.
   class Features;
 
-  InstructionSelector(Zone* local_zone, Linkage* linkage,
+  // TODO(dcarney): pass in vreg mapping instead of graph.
+  InstructionSelector(Zone* local_zone, Graph* graph, Linkage* linkage,
                       InstructionSequence* sequence, Schedule* schedule,
                       SourcePositionTable* source_positions,
                       Features features = SupportedFeatures());
@@ -109,6 +115,11 @@
   // Checks if {node} is currently live.
   bool IsLive(Node* node) const { return !IsDefined(node) && IsUsed(node); }
 
+  int GetVirtualRegister(const Node* node);
+  // Gets the current mapping if it exists, kNodeUnmapped otherwise.
+  int GetMappedVirtualRegister(const Node* node) const;
+  const NodeToVregMap& GetNodeMapForTesting() const { return node_map_; }
+
  private:
   friend class OperandGenerator;
 
@@ -205,6 +216,7 @@
   SourcePositionTable* const source_positions_;
   Features features_;
   Schedule* const schedule_;
+  NodeToVregMap node_map_;
   BasicBlock* current_block_;
   ZoneDeque<Instruction*> instructions_;
   BoolVector defined_;
diff --git a/src/compiler/instruction.cc b/src/compiler/instruction.cc
index 15a01f2..7ee5a69 100644
--- a/src/compiler/instruction.cc
+++ b/src/compiler/instruction.cc
@@ -2,16 +2,20 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/compiler/instruction.h"
-
 #include "src/compiler/common-operator.h"
 #include "src/compiler/generic-node-inl.h"
 #include "src/compiler/graph.h"
+#include "src/compiler/instruction.h"
+#include "src/macro-assembler.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
+STATIC_ASSERT(kMaxGeneralRegisters >= Register::kNumRegisters);
+STATIC_ASSERT(kMaxDoubleRegisters >= DoubleRegister::kMaxNumRegisters);
+
+
 std::ostream& operator<<(std::ostream& os, const InstructionOperand& op) {
   switch (op.kind()) {
     case InstructionOperand::INVALID:
@@ -383,10 +387,8 @@
 
 
 InstructionSequence::InstructionSequence(Zone* instruction_zone,
-                                         const Graph* graph,
                                          const Schedule* schedule)
     : zone_(instruction_zone),
-      node_map_(graph->NodeCount(), kNodeUnmapped, zone()),
       instruction_blocks_(static_cast<int>(schedule->rpo_order()->size()), NULL,
                           zone()),
       constants_(ConstantMap::key_compare(),
@@ -402,14 +404,6 @@
 }
 
 
-int InstructionSequence::GetVirtualRegister(const Node* node) {
-  if (node_map_[node->id()] == kNodeUnmapped) {
-    node_map_[node->id()] = NextVirtualRegister();
-  }
-  return node_map_[node->id()];
-}
-
-
 Label* InstructionSequence::GetLabel(BasicBlock::RpoNumber rpo) {
   return GetBlockStart(rpo)->label();
 }
diff --git a/src/compiler/instruction.h b/src/compiler/instruction.h
index 3a82c11..75c3e9e 100644
--- a/src/compiler/instruction.h
+++ b/src/compiler/instruction.h
@@ -16,30 +16,29 @@
 #include "src/compiler/opcodes.h"
 #include "src/compiler/schedule.h"
 #include "src/compiler/source-position.h"
-// TODO(titzer): don't include the macro-assembler?
-#include "src/macro-assembler.h"
 #include "src/zone-allocator.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
-// Forward declarations.
-class Linkage;
-
 // A couple of reserved opcodes are used for internal use.
 const InstructionCode kGapInstruction = -1;
 const InstructionCode kBlockStartInstruction = -2;
 const InstructionCode kSourcePositionInstruction = -3;
 
+// Platform independent maxes.
+static const int kMaxGeneralRegisters = 32;
+static const int kMaxDoubleRegisters = 32;
 
-#define INSTRUCTION_OPERAND_LIST(V)              \
-  V(Constant, CONSTANT, 0)                       \
-  V(Immediate, IMMEDIATE, 0)                     \
-  V(StackSlot, STACK_SLOT, 128)                  \
-  V(DoubleStackSlot, DOUBLE_STACK_SLOT, 128)     \
-  V(Register, REGISTER, Register::kNumRegisters) \
-  V(DoubleRegister, DOUBLE_REGISTER, DoubleRegister::kMaxNumRegisters)
+
+#define INSTRUCTION_OPERAND_LIST(V)           \
+  V(Constant, CONSTANT, 0)                    \
+  V(Immediate, IMMEDIATE, 0)                  \
+  V(StackSlot, STACK_SLOT, 128)               \
+  V(DoubleStackSlot, DOUBLE_STACK_SLOT, 128)  \
+  V(Register, REGISTER, kMaxGeneralRegisters) \
+  V(DoubleRegister, DOUBLE_REGISTER, kMaxDoubleRegisters)
 
 class InstructionOperand : public ZoneObject {
  public:
@@ -676,8 +675,10 @@
   Type type() const { return type_; }
 
   int32_t ToInt32() const {
-    DCHECK_EQ(kInt32, type());
-    return static_cast<int32_t>(value_);
+    DCHECK(type() == kInt32 || type() == kInt64);
+    const int32_t value = static_cast<int32_t>(value_);
+    DCHECK_EQ(value_, static_cast<int64_t>(value));
+    return value;
   }
 
   int64_t ToInt64() const {
@@ -843,22 +844,17 @@
 typedef ZoneDeque<PointerMap*> PointerMapDeque;
 typedef ZoneVector<FrameStateDescriptor*> DeoptimizationVector;
 typedef ZoneVector<InstructionBlock*> InstructionBlocks;
-typedef IntVector NodeToVregMap;
 
 // Represents architecture-specific generated code before, during, and after
 // register allocation.
 // TODO(titzer): s/IsDouble/IsFloat64/
 class InstructionSequence FINAL {
  public:
-  static const int kNodeUnmapped = -1;
-
-  InstructionSequence(Zone* zone, const Graph* graph, const Schedule* schedule);
+  InstructionSequence(Zone* zone, const Schedule* schedule);
 
   int NextVirtualRegister() { return next_virtual_register_++; }
   int VirtualRegisterCount() const { return next_virtual_register_; }
 
-  int node_count() const { return static_cast<int>(node_map_.size()); }
-
   const InstructionBlocks& instruction_blocks() const {
     return instruction_blocks_;
   }
@@ -883,9 +879,6 @@
 
   const InstructionBlock* GetInstructionBlock(int instruction_index) const;
 
-  int GetVirtualRegister(const Node* node);
-  const NodeToVregMap& GetNodeMapForTesting() const { return node_map_; }
-
   bool IsReference(int virtual_register) const;
   bool IsDouble(int virtual_register) const;
 
@@ -920,8 +913,8 @@
   void StartBlock(BasicBlock* block);
   void EndBlock(BasicBlock* block);
 
-  int AddConstant(Node* node, Constant constant) {
-    int virtual_register = GetVirtualRegister(node);
+  int AddConstant(int virtual_register, Constant constant) {
+    DCHECK(virtual_register >= 0 && virtual_register < next_virtual_register_);
     DCHECK(constants_.find(virtual_register) == constants_.end());
     constants_.insert(std::make_pair(virtual_register, constant));
     return virtual_register;
@@ -968,7 +961,6 @@
   typedef std::set<int, std::less<int>, ZoneIntAllocator> VirtualRegisterSet;
 
   Zone* const zone_;
-  NodeToVregMap node_map_;
   InstructionBlocks instruction_blocks_;
   ConstantMap constants_;
   ConstantDeque immediates_;
diff --git a/src/compiler/js-builtin-reducer.cc b/src/compiler/js-builtin-reducer.cc
index b40288d..dbaa293 100644
--- a/src/compiler/js-builtin-reducer.cc
+++ b/src/compiler/js-builtin-reducer.cc
@@ -191,6 +191,32 @@
 }
 
 
+// ES6 draft 10-14-14, section 20.2.2.16.
+Reduction JSBuiltinReducer::ReduceMathFloor(Node* node) {
+  if (!machine()->HasFloat64Floor()) return NoChange();
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::Number())) {
+    // Math.floor(a:number) -> Float64Floor(a)
+    Node* value = graph()->NewNode(machine()->Float64Floor(), r.left());
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+
+// ES6 draft 10-14-14, section 20.2.2.10.
+Reduction JSBuiltinReducer::ReduceMathCeil(Node* node) {
+  if (!machine()->HasFloat64Ceil()) return NoChange();
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::Number())) {
+    // Math.ceil(a:number) -> Float64Ceil(a)
+    Node* value = graph()->NewNode(machine()->Float64Ceil(), r.left());
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+
 Reduction JSBuiltinReducer::Reduce(Node* node) {
   JSCallReduction r(node);
 
@@ -207,6 +233,10 @@
       return ReplaceWithPureReduction(node, ReduceMathImul(node));
     case kMathFround:
       return ReplaceWithPureReduction(node, ReduceMathFround(node));
+    case kMathFloor:
+      return ReplaceWithPureReduction(node, ReduceMathFloor(node));
+    case kMathCeil:
+      return ReplaceWithPureReduction(node, ReduceMathCeil(node));
     default:
       break;
   }
diff --git a/src/compiler/js-builtin-reducer.h b/src/compiler/js-builtin-reducer.h
index f3b862f..4b3be29 100644
--- a/src/compiler/js-builtin-reducer.h
+++ b/src/compiler/js-builtin-reducer.h
@@ -35,6 +35,8 @@
   Reduction ReduceMathMax(Node* node);
   Reduction ReduceMathImul(Node* node);
   Reduction ReduceMathFround(Node* node);
+  Reduction ReduceMathFloor(Node* node);
+  Reduction ReduceMathCeil(Node* node);
 
   JSGraph* jsgraph_;
   SimplifiedOperatorBuilder simplified_;
diff --git a/src/compiler/js-graph.cc b/src/compiler/js-graph.cc
index 22e6760..da6d66d 100644
--- a/src/compiler/js-graph.cc
+++ b/src/compiler/js-graph.cc
@@ -2,6 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "src/code-stubs.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/node-properties-inl.h"
 #include "src/compiler/typer.h"
diff --git a/src/compiler/linkage-impl.h b/src/compiler/linkage-impl.h
index c32c706..807d626 100644
--- a/src/compiler/linkage-impl.h
+++ b/src/compiler/linkage-impl.h
@@ -5,6 +5,8 @@
 #ifndef V8_COMPILER_LINKAGE_IMPL_H_
 #define V8_COMPILER_LINKAGE_IMPL_H_
 
+#include "src/code-stubs.h"
+
 namespace v8 {
 namespace internal {
 namespace compiler {
@@ -129,8 +131,8 @@
 
   // TODO(turbofan): cache call descriptors for code stub calls.
   static CallDescriptor* GetStubCallDescriptor(
-      Zone* zone, CallInterfaceDescriptor descriptor, int stack_parameter_count,
-      CallDescriptor::Flags flags) {
+      Zone* zone, const CallInterfaceDescriptor& descriptor,
+      int stack_parameter_count, CallDescriptor::Flags flags) {
     const int register_parameter_count =
         descriptor.GetEnvironmentParameterCount();
     const int js_parameter_count =
diff --git a/src/compiler/linkage.cc b/src/compiler/linkage.cc
index dcfc78a..b586301 100644
--- a/src/compiler/linkage.cc
+++ b/src/compiler/linkage.cc
@@ -2,10 +2,9 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/compiler/linkage.h"
-
 #include "src/code-stubs.h"
 #include "src/compiler.h"
+#include "src/compiler/linkage.h"
 #include "src/compiler/node.h"
 #include "src/compiler/pipeline.h"
 #include "src/scopes.h"
@@ -102,7 +101,7 @@
 
 
 CallDescriptor* Linkage::GetStubCallDescriptor(
-    CallInterfaceDescriptor descriptor, int stack_parameter_count,
+    const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
     CallDescriptor::Flags flags) const {
   return GetStubCallDescriptor(descriptor, stack_parameter_count, flags, zone_);
 }
@@ -233,7 +232,7 @@
 
 
 CallDescriptor* Linkage::GetStubCallDescriptor(
-    CallInterfaceDescriptor descriptor, int stack_parameter_count,
+    const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
     CallDescriptor::Flags flags, Zone* zone) {
   UNIMPLEMENTED();
   return NULL;
diff --git a/src/compiler/linkage.h b/src/compiler/linkage.h
index cc5be9b..d11fa12 100644
--- a/src/compiler/linkage.h
+++ b/src/compiler/linkage.h
@@ -6,15 +6,16 @@
 #define V8_COMPILER_LINKAGE_H_
 
 #include "src/base/flags.h"
-#include "src/code-stubs.h"
 #include "src/compiler/frame.h"
 #include "src/compiler/machine-type.h"
-#include "src/compiler/node.h"
 #include "src/compiler/operator.h"
 #include "src/zone.h"
 
 namespace v8 {
 namespace internal {
+
+class CallInterfaceDescriptor;
+
 namespace compiler {
 
 // Describes the location for a parameter or a return value to a call.
@@ -183,10 +184,10 @@
       Operator::Properties properties, Zone* zone);
 
   CallDescriptor* GetStubCallDescriptor(
-      CallInterfaceDescriptor descriptor, int stack_parameter_count = 0,
+      const CallInterfaceDescriptor& descriptor, int stack_parameter_count = 0,
       CallDescriptor::Flags flags = CallDescriptor::kNoFlags) const;
   static CallDescriptor* GetStubCallDescriptor(
-      CallInterfaceDescriptor descriptor, int stack_parameter_count,
+      const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
       CallDescriptor::Flags flags, Zone* zone);
 
   // Creates a call descriptor for simplified C calls that is appropriate
diff --git a/src/compiler/machine-operator-reducer.cc b/src/compiler/machine-operator-reducer.cc
index 98873c3..3803af5 100644
--- a/src/compiler/machine-operator-reducer.cc
+++ b/src/compiler/machine-operator-reducer.cc
@@ -370,15 +370,16 @@
       if (m.left().IsWord32Sar() && m.right().HasValue()) {
         Int32BinopMatcher mleft(m.left().node());
         if (mleft.right().HasValue()) {
-          // (x >> K) < C => x < (C << K) | (2^K - 1)
+          // (x >> K) < C => x < (C << K)
           // when C < (M >> K)
           const uint32_t c = m.right().Value();
           const uint32_t k = mleft.right().Value() & 0x1f;
           if (c < static_cast<uint32_t>(kMaxInt >> k)) {
             node->ReplaceInput(0, mleft.left().node());
-            node->ReplaceInput(1, Uint32Constant((c << k) | ((1 << k) - 1)));
+            node->ReplaceInput(1, Uint32Constant(c << k));
             return Changed(node);
           }
+          // TODO(turbofan): else the comparison is always true.
         }
       }
       break;
diff --git a/src/compiler/machine-type.h b/src/compiler/machine-type.h
index 4c38a6d..4c51a9f 100644
--- a/src/compiler/machine-type.h
+++ b/src/compiler/machine-type.h
@@ -81,26 +81,34 @@
   return static_cast<MachineType>(result);
 }
 
-// Gets the element size in bytes of the machine type.
-inline int ElementSizeOf(MachineType machine_type) {
+// Gets the log2 of the element size in bytes of the machine type.
+inline int ElementSizeLog2Of(MachineType machine_type) {
   switch (RepresentationOf(machine_type)) {
     case kRepBit:
     case kRepWord8:
-      return 1;
+      return 0;
     case kRepWord16:
-      return 2;
+      return 1;
     case kRepWord32:
     case kRepFloat32:
-      return 4;
+      return 2;
     case kRepWord64:
     case kRepFloat64:
-      return 8;
+      return 3;
     case kRepTagged:
-      return kPointerSize;
+      return kPointerSizeLog2;
     default:
-      UNREACHABLE();
-      return kPointerSize;
+      break;
   }
+  UNREACHABLE();
+  return -1;
+}
+
+// Gets the element size in bytes of the machine type.
+inline int ElementSizeOf(MachineType machine_type) {
+  const int shift = ElementSizeLog2Of(machine_type);
+  DCHECK_NE(-1, shift);
+  return 1 << shift;
 }
 
 // Describes the inputs and outputs of a function or call.
diff --git a/src/compiler/mips/instruction-selector-mips.cc b/src/compiler/mips/instruction-selector-mips.cc
index 9be6bb6..637d98b 100644
--- a/src/compiler/mips/instruction-selector-mips.cc
+++ b/src/compiler/mips/instruction-selector-mips.cc
@@ -409,6 +409,22 @@
 }
 
 
+void InstructionSelector::VisitFloat64Floor(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitFloat64Ceil(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+  UNREACHABLE();
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+  UNREACHABLE();
+}
+
+
 void InstructionSelector::VisitCall(Node* node) {
   MipsOperandGenerator g(this);
   CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
diff --git a/src/compiler/mips/linkage-mips.cc b/src/compiler/mips/linkage-mips.cc
index b9f3fa2..91fe9c3 100644
--- a/src/compiler/mips/linkage-mips.cc
+++ b/src/compiler/mips/linkage-mips.cc
@@ -49,7 +49,7 @@
 
 
 CallDescriptor* Linkage::GetStubCallDescriptor(
-    CallInterfaceDescriptor descriptor, int stack_parameter_count,
+    const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
     CallDescriptor::Flags flags, Zone* zone) {
   return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
                                    flags);
diff --git a/src/compiler/node-matchers.cc b/src/compiler/node-matchers.cc
deleted file mode 100644
index 4d7fb84..0000000
--- a/src/compiler/node-matchers.cc
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/generic-node-inl.h"
-#include "src/compiler/node-matchers.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-const int ScaleFactorMatcher::kMatchedFactors[] = {1, 2, 4, 8};
-
-
-ScaleFactorMatcher::ScaleFactorMatcher(Node* node)
-    : NodeMatcher(node), left_(NULL), power_(0) {
-  if (opcode() != IrOpcode::kInt32Mul) return;
-  // TODO(dcarney): should test 64 bit ints as well.
-  Int32BinopMatcher m(this->node());
-  if (!m.right().HasValue()) return;
-  int32_t value = m.right().Value();
-  switch (value) {
-    case 8:
-      power_++;  // Fall through.
-    case 4:
-      power_++;  // Fall through.
-    case 2:
-      power_++;  // Fall through.
-    case 1:
-      break;
-    default:
-      return;
-  }
-  left_ = m.left().node();
-}
-
-
-IndexAndDisplacementMatcher::IndexAndDisplacementMatcher(Node* node)
-    : NodeMatcher(node), index_node_(node), displacement_(0), power_(0) {
-  if (opcode() == IrOpcode::kInt32Add) {
-    Int32BinopMatcher m(this->node());
-    if (m.right().HasValue()) {
-      displacement_ = m.right().Value();
-      index_node_ = m.left().node();
-    }
-  }
-  // Test scale factor.
-  ScaleFactorMatcher scale_matcher(index_node_);
-  if (scale_matcher.Matches()) {
-    index_node_ = scale_matcher.Left();
-    power_ = scale_matcher.Power();
-  }
-}
-
-
-const int LeaMultiplyMatcher::kMatchedFactors[7] = {1, 2, 3, 4, 5, 8, 9};
-
-
-LeaMultiplyMatcher::LeaMultiplyMatcher(Node* node)
-    : NodeMatcher(node), left_(NULL), power_(0), displacement_(0) {
-  if (opcode() != IrOpcode::kInt32Mul && opcode() != IrOpcode::kInt64Mul) {
-    return;
-  }
-  int64_t value;
-  Node* left = NULL;
-  {
-    Int32BinopMatcher m(this->node());
-    if (m.right().HasValue()) {
-      value = m.right().Value();
-      left = m.left().node();
-    } else {
-      Int64BinopMatcher m(this->node());
-      if (m.right().HasValue()) {
-        value = m.right().Value();
-        left = m.left().node();
-      } else {
-        return;
-      }
-    }
-  }
-  switch (value) {
-    case 9:
-    case 8:
-      power_++;  // Fall through.
-    case 5:
-    case 4:
-      power_++;  // Fall through.
-    case 3:
-    case 2:
-      power_++;  // Fall through.
-    case 1:
-      break;
-    default:
-      return;
-  }
-  if (!base::bits::IsPowerOfTwo64(value)) {
-    displacement_ = 1;
-  }
-  left_ = left;
-}
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/node-matchers.h b/src/compiler/node-matchers.h
index 8c124ba..a55e7bf 100644
--- a/src/compiler/node-matchers.h
+++ b/src/compiler/node-matchers.h
@@ -150,88 +150,6 @@
 typedef BinopMatcher<Float64Matcher, Float64Matcher> Float64BinopMatcher;
 typedef BinopMatcher<NumberMatcher, NumberMatcher> NumberBinopMatcher;
 
-
-// Fairly intel-specify node matcher used for matching scale factors in
-// addressing modes.
-// Matches nodes of form [x * N] for N in {1,2,4,8}
-class ScaleFactorMatcher : public NodeMatcher {
- public:
-  static const int kMatchedFactors[4];
-
-  explicit ScaleFactorMatcher(Node* node);
-
-  bool Matches() const { return left_ != NULL; }
-  int Power() const {
-    DCHECK(Matches());
-    return power_;
-  }
-  Node* Left() const {
-    DCHECK(Matches());
-    return left_;
-  }
-
- private:
-  Node* left_;
-  int power_;
-};
-
-
-// Fairly intel-specify node matcher used for matching index and displacement
-// operands in addressing modes.
-// Matches nodes of form:
-//  [x * N]
-//  [x * N + K]
-//  [x + K]
-//  [x] -- fallback case
-// for N in {1,2,4,8} and K int32_t
-class IndexAndDisplacementMatcher : public NodeMatcher {
- public:
-  explicit IndexAndDisplacementMatcher(Node* node);
-
-  Node* index_node() const { return index_node_; }
-  int displacement() const { return displacement_; }
-  int power() const { return power_; }
-
- private:
-  Node* index_node_;
-  int displacement_;
-  int power_;
-};
-
-
-// Fairly intel-specify node matcher used for matching multiplies that can be
-// transformed to lea instructions.
-// Matches nodes of form:
-//  [x * N]
-// for N in {1,2,3,4,5,8,9}
-class LeaMultiplyMatcher : public NodeMatcher {
- public:
-  static const int kMatchedFactors[7];
-
-  explicit LeaMultiplyMatcher(Node* node);
-
-  bool Matches() const { return left_ != NULL; }
-  int Power() const {
-    DCHECK(Matches());
-    return power_;
-  }
-  Node* Left() const {
-    DCHECK(Matches());
-    return left_;
-  }
-  // Displacement will be either 0 or 1.
-  int32_t Displacement() const {
-    DCHECK(Matches());
-    return displacement_;
-  }
-
- private:
-  Node* left_;
-  int power_;
-  int displacement_;
-};
-
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc
index fc0a432..0dcb408 100644
--- a/src/compiler/pipeline.cc
+++ b/src/compiler/pipeline.cc
@@ -538,15 +538,15 @@
                                                        data->schedule());
   }
 
-  InstructionSequence sequence(data->instruction_zone(), data->graph(),
-                               data->schedule());
+  InstructionSequence sequence(data->instruction_zone(), data->schedule());
 
   // Select and schedule instructions covering the scheduled graph.
   {
     PhaseScope phase_scope(data->pipeline_statistics(), "select instructions");
     ZonePool::Scope zone_scope(data->zone_pool());
-    InstructionSelector selector(zone_scope.zone(), linkage, &sequence,
-                                 data->schedule(), data->source_positions());
+    InstructionSelector selector(zone_scope.zone(), data->graph(), linkage,
+                                 &sequence, data->schedule(),
+                                 data->source_positions());
     selector.SelectInstructions();
   }
 
@@ -580,7 +580,9 @@
     debug_name = GetDebugName(info());
 #endif
 
-    RegisterAllocator allocator(zone_scope.zone(), &frame, &sequence,
+
+    RegisterAllocator allocator(RegisterAllocator::PlatformConfig(),
+                                zone_scope.zone(), &frame, &sequence,
                                 debug_name.get());
     if (!allocator.Allocate(data->pipeline_statistics())) {
       info()->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
diff --git a/src/compiler/raw-machine-assembler.cc b/src/compiler/raw-machine-assembler.cc
index ba545c1..1d8b187 100644
--- a/src/compiler/raw-machine-assembler.cc
+++ b/src/compiler/raw-machine-assembler.cc
@@ -13,10 +13,11 @@
 
 RawMachineAssembler::RawMachineAssembler(Graph* graph,
                                          MachineSignature* machine_sig,
-                                         MachineType word)
+                                         MachineType word,
+                                         MachineOperatorBuilder::Flags flags)
     : GraphBuilder(graph),
       schedule_(new (zone()) Schedule(zone())),
-      machine_(word),
+      machine_(word, flags),
       common_(zone()),
       machine_sig_(machine_sig),
       call_descriptor_(
diff --git a/src/compiler/raw-machine-assembler.h b/src/compiler/raw-machine-assembler.h
index dfe83fa..01fa509 100644
--- a/src/compiler/raw-machine-assembler.h
+++ b/src/compiler/raw-machine-assembler.h
@@ -45,7 +45,9 @@
   };
 
   RawMachineAssembler(Graph* graph, MachineSignature* machine_sig,
-                      MachineType word = kMachPtr);
+                      MachineType word = kMachPtr,
+                      MachineOperatorBuilder::Flags flags =
+                          MachineOperatorBuilder::Flag::kNoFlags);
   virtual ~RawMachineAssembler() {}
 
   Isolate* isolate() const { return zone()->isolate(); }
@@ -380,6 +382,14 @@
   Node* TruncateInt64ToInt32(Node* a) {
     return NewNode(machine()->TruncateInt64ToInt32(), a);
   }
+  Node* Float64Floor(Node* a) { return NewNode(machine()->Float64Floor(), a); }
+  Node* Float64Ceil(Node* a) { return NewNode(machine()->Float64Ceil(), a); }
+  Node* Float64RoundTruncate(Node* a) {
+    return NewNode(machine()->Float64RoundTruncate(), a);
+  }
+  Node* Float64RoundTiesAway(Node* a) {
+    return NewNode(machine()->Float64RoundTiesAway(), a);
+  }
 
   // Parameters.
   Node* Parameter(size_t index);
diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc
index e65b9ff..ced88a8 100644
--- a/src/compiler/register-allocator.cc
+++ b/src/compiler/register-allocator.cc
@@ -5,6 +5,7 @@
 #include "src/compiler/linkage.h"
 #include "src/compiler/pipeline-statistics.h"
 #include "src/compiler/register-allocator.h"
+#include "src/macro-assembler.h"  // TODO(dcarney): remove this.
 #include "src/string-stream.h"
 
 namespace v8 {
@@ -506,24 +507,47 @@
 }
 
 
-RegisterAllocator::RegisterAllocator(Zone* local_zone, Frame* frame,
-                                     InstructionSequence* code,
+RegisterAllocator::Config RegisterAllocator::PlatformConfig() {
+  DCHECK_EQ(Register::kMaxNumAllocatableRegisters,
+            Register::NumAllocatableRegisters());
+  Config config;
+  config.num_general_registers_ = Register::kMaxNumAllocatableRegisters;
+  config.num_double_registers_ = DoubleRegister::kMaxNumAllocatableRegisters;
+  config.num_aliased_double_registers_ =
+      DoubleRegister::NumAllocatableAliasedRegisters();
+  config.GeneralRegisterName = Register::AllocationIndexToString;
+  config.DoubleRegisterName = DoubleRegister::AllocationIndexToString;
+  return config;
+}
+
+
+RegisterAllocator::RegisterAllocator(const Config& config, Zone* local_zone,
+                                     Frame* frame, InstructionSequence* code,
                                      const char* debug_name)
     : zone_(local_zone),
       frame_(frame),
       code_(code),
       debug_name_(debug_name),
+      config_(config),
       live_in_sets_(code->InstructionBlockCount(), zone()),
       live_ranges_(code->VirtualRegisterCount() * 2, zone()),
-      fixed_live_ranges_(NULL),
-      fixed_double_live_ranges_(NULL),
+      fixed_live_ranges_(this->config().num_general_registers_, NULL, zone()),
+      fixed_double_live_ranges_(this->config().num_double_registers_, NULL,
+                                zone()),
       unhandled_live_ranges_(code->VirtualRegisterCount() * 2, zone()),
       active_live_ranges_(8, zone()),
       inactive_live_ranges_(8, zone()),
       reusable_slots_(8, zone()),
       mode_(UNALLOCATED_REGISTERS),
       num_registers_(-1),
-      allocation_ok_(true) {}
+      allocation_ok_(true) {
+  DCHECK(this->config().num_general_registers_ <= kMaxGeneralRegisters);
+  DCHECK(this->config().num_double_registers_ <= kMaxDoubleRegisters);
+  // TryAllocateFreeReg and AllocateBlockedReg assume this
+  // when allocating local arrays.
+  DCHECK(this->config().num_double_registers_ >=
+         this->config().num_general_registers_);
+}
 
 
 void RegisterAllocator::InitializeLivenessAnalysis() {
@@ -579,7 +603,7 @@
 
 
 int RegisterAllocator::FixedDoubleLiveRangeID(int index) {
-  return -index - 1 - Register::kMaxNumAllocatableRegisters;
+  return -index - 1 - config().num_general_registers_;
 }
 
 
@@ -611,7 +635,7 @@
 
 
 LiveRange* RegisterAllocator::FixedLiveRangeFor(int index) {
-  DCHECK(index < Register::kMaxNumAllocatableRegisters);
+  DCHECK(index < config().num_general_registers_);
   LiveRange* result = fixed_live_ranges_[index];
   if (result == NULL) {
     // TODO(titzer): add a utility method to allocate a new LiveRange:
@@ -629,7 +653,7 @@
 
 
 LiveRange* RegisterAllocator::FixedDoubleLiveRangeFor(int index) {
-  DCHECK(index < DoubleRegister::NumAllocatableAliasedRegisters());
+  DCHECK(index < config().num_aliased_double_registers_);
   LiveRange* result = fixed_double_live_ranges_[index];
   if (result == NULL) {
     result = new (zone()) LiveRange(FixedDoubleLiveRangeID(index), code_zone());
@@ -1007,7 +1031,7 @@
       }
 
       if (instr->ClobbersRegisters()) {
-        for (int i = 0; i < Register::kMaxNumAllocatableRegisters; ++i) {
+        for (int i = 0; i < config().num_general_registers_; ++i) {
           if (!IsOutputRegisterOf(instr, i)) {
             LiveRange* range = FixedLiveRangeFor(i);
             range->AddUseInterval(curr_position, curr_position.InstructionEnd(),
@@ -1017,8 +1041,7 @@
       }
 
       if (instr->ClobbersDoubleRegisters()) {
-        for (int i = 0; i < DoubleRegister::NumAllocatableAliasedRegisters();
-             ++i) {
+        for (int i = 0; i < config().num_aliased_double_registers_; ++i) {
           if (!IsOutputDoubleRegisterOf(instr, i)) {
             LiveRange* range = FixedDoubleLiveRangeFor(i);
             range->AddUseInterval(curr_position, curr_position.InstructionEnd(),
@@ -1103,10 +1126,10 @@
 
 
 bool RegisterAllocator::Allocate(PipelineStatistics* stats) {
-  assigned_registers_ = new (code_zone())
-      BitVector(Register::NumAllocatableRegisters(), code_zone());
+  assigned_registers_ =
+      new (code_zone()) BitVector(config().num_general_registers_, code_zone());
   assigned_double_registers_ = new (code_zone())
-      BitVector(DoubleRegister::NumAllocatableAliasedRegisters(), code_zone());
+      BitVector(config().num_aliased_double_registers_, code_zone());
   {
     PhaseScope phase_scope(stats, "meet register constraints");
     MeetRegisterConstraints();
@@ -1235,8 +1258,8 @@
 
 
 void RegisterAllocator::ConnectRanges() {
-  for (int i = 0; i < live_ranges()->length(); ++i) {
-    LiveRange* first_range = live_ranges()->at(i);
+  for (int i = 0; i < live_ranges().length(); ++i) {
+    LiveRange* first_range = live_ranges().at(i);
     if (first_range == NULL || first_range->parent() != NULL) continue;
 
     LiveRange* second_range = first_range->next();
@@ -1437,8 +1460,8 @@
   int last_range_start = 0;
   const PointerMapDeque* pointer_maps = code()->pointer_maps();
   PointerMapDeque::const_iterator first_it = pointer_maps->begin();
-  for (int range_idx = 0; range_idx < live_ranges()->length(); ++range_idx) {
-    LiveRange* range = live_ranges()->at(range_idx);
+  for (int range_idx = 0; range_idx < live_ranges().length(); ++range_idx) {
+    LiveRange* range = live_ranges().at(range_idx);
     if (range == NULL) continue;
     // Iterate over the first parts of multi-part live ranges.
     if (range->parent() != NULL) continue;
@@ -1512,14 +1535,14 @@
 
 
 void RegisterAllocator::AllocateGeneralRegisters() {
-  num_registers_ = Register::NumAllocatableRegisters();
+  num_registers_ = config().num_general_registers_;
   mode_ = GENERAL_REGISTERS;
   AllocateRegisters();
 }
 
 
 void RegisterAllocator::AllocateDoubleRegisters() {
-  num_registers_ = DoubleRegister::NumAllocatableAliasedRegisters();
+  num_registers_ = config().num_aliased_double_registers_;
   mode_ = DOUBLE_REGISTERS;
   AllocateRegisters();
 }
@@ -1543,7 +1566,7 @@
   DCHECK(inactive_live_ranges_.is_empty());
 
   if (mode_ == DOUBLE_REGISTERS) {
-    for (int i = 0; i < DoubleRegister::NumAllocatableAliasedRegisters(); ++i) {
+    for (int i = 0; i < config().num_aliased_double_registers_; ++i) {
       LiveRange* current = fixed_double_live_ranges_.at(i);
       if (current != NULL) {
         AddToInactive(current);
@@ -1551,8 +1574,7 @@
     }
   } else {
     DCHECK(mode_ == GENERAL_REGISTERS);
-    for (int i = 0; i < fixed_live_ranges_.length(); ++i) {
-      LiveRange* current = fixed_live_ranges_.at(i);
+    for (auto current : fixed_live_ranges()) {
       if (current != NULL) {
         AddToInactive(current);
       }
@@ -1636,9 +1658,9 @@
 
 const char* RegisterAllocator::RegisterName(int allocation_index) {
   if (mode_ == GENERAL_REGISTERS) {
-    return Register::AllocationIndexToString(allocation_index);
+    return config().GeneralRegisterName(allocation_index);
   } else {
-    return DoubleRegister::AllocationIndexToString(allocation_index);
+    return config().DoubleRegisterName(allocation_index);
   }
 }
 
@@ -1782,14 +1804,8 @@
 }
 
 
-// TryAllocateFreeReg and AllocateBlockedReg assume this
-// when allocating local arrays.
-STATIC_ASSERT(DoubleRegister::kMaxNumAllocatableRegisters >=
-              Register::kMaxNumAllocatableRegisters);
-
-
 bool RegisterAllocator::TryAllocateFreeReg(LiveRange* current) {
-  LifetimePosition free_until_pos[DoubleRegister::kMaxNumAllocatableRegisters];
+  LifetimePosition free_until_pos[kMaxDoubleRegisters];
 
   for (int i = 0; i < num_registers_; i++) {
     free_until_pos[i] = LifetimePosition::MaxPosition();
@@ -1872,9 +1888,8 @@
     return;
   }
 
-
-  LifetimePosition use_pos[DoubleRegister::kMaxNumAllocatableRegisters];
-  LifetimePosition block_pos[DoubleRegister::kMaxNumAllocatableRegisters];
+  LifetimePosition use_pos[kMaxGeneralRegisters];
+  LifetimePosition block_pos[kMaxDoubleRegisters];
 
   for (int i = 0; i < num_registers_; i++) {
     use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
@@ -2187,8 +2202,7 @@
 
 
 void RegisterAllocator::Verify() const {
-  for (int i = 0; i < live_ranges()->length(); ++i) {
-    LiveRange* current = live_ranges()->at(i);
+  for (auto current : live_ranges()) {
     if (current != NULL) current->Verify();
   }
 }
diff --git a/src/compiler/register-allocator.h b/src/compiler/register-allocator.h
index 55abd5f..40fafb1 100644
--- a/src/compiler/register-allocator.h
+++ b/src/compiler/register-allocator.h
@@ -5,10 +5,8 @@
 #ifndef V8_REGISTER_ALLOCATOR_H_
 #define V8_REGISTER_ALLOCATOR_H_
 
-#include "src/allocation.h"
 #include "src/compiler/instruction.h"
-#include "src/macro-assembler.h"
-#include "src/zone.h"
+#include "src/zone-containers.h"
 
 namespace v8 {
 namespace internal {
@@ -321,8 +319,19 @@
 
 class RegisterAllocator BASE_EMBEDDED {
  public:
-  explicit RegisterAllocator(Zone* local_zone, Frame* frame,
-                             InstructionSequence* code,
+  class Config {
+   public:
+    int num_general_registers_;
+    int num_double_registers_;
+    int num_aliased_double_registers_;
+    const char* (*GeneralRegisterName)(int allocation_index);
+    const char* (*DoubleRegisterName)(int allocation_index);
+  };
+
+  static Config PlatformConfig();
+
+  explicit RegisterAllocator(const Config& config, Zone* local_zone,
+                             Frame* frame, InstructionSequence* code,
                              const char* debug_name = nullptr);
 
   bool Allocate(PipelineStatistics* stats = NULL);
@@ -330,12 +339,12 @@
   BitVector* assigned_registers() { return assigned_registers_; }
   BitVector* assigned_double_registers() { return assigned_double_registers_; }
 
-  const ZoneList<LiveRange*>* live_ranges() const { return &live_ranges_; }
-  const Vector<LiveRange*>* fixed_live_ranges() const {
-    return &fixed_live_ranges_;
+  const ZoneList<LiveRange*>& live_ranges() const { return live_ranges_; }
+  const ZoneVector<LiveRange*>& fixed_live_ranges() const {
+    return fixed_live_ranges_;
   }
-  const Vector<LiveRange*>* fixed_double_live_ranges() const {
-    return &fixed_double_live_ranges_;
+  const ZoneVector<LiveRange*>& fixed_double_live_ranges() const {
+    return fixed_double_live_ranges_;
   }
   InstructionSequence* code() const { return code_; }
 
@@ -481,7 +490,7 @@
   // Helper methods for the fixed registers.
   int RegisterCount() const;
   static int FixedLiveRangeID(int index) { return -index - 1; }
-  static int FixedDoubleLiveRangeID(int index);
+  int FixedDoubleLiveRangeID(int index);
   LiveRange* FixedLiveRangeFor(int index);
   LiveRange* FixedDoubleLiveRangeFor(int index);
   LiveRange* LiveRangeFor(int index);
@@ -493,12 +502,15 @@
 
   Frame* frame() const { return frame_; }
   const char* debug_name() const { return debug_name_; }
+  const Config& config() const { return config_; }
 
   Zone* const zone_;
   Frame* const frame_;
   InstructionSequence* const code_;
   const char* const debug_name_;
 
+  const Config config_;
+
   // During liveness analysis keep a mapping from block id to live_in sets
   // for blocks already analyzed.
   ZoneList<BitVector*> live_in_sets_;
@@ -507,10 +519,8 @@
   ZoneList<LiveRange*> live_ranges_;
 
   // Lists of live ranges
-  EmbeddedVector<LiveRange*, Register::kMaxNumAllocatableRegisters>
-      fixed_live_ranges_;
-  EmbeddedVector<LiveRange*, DoubleRegister::kMaxNumAllocatableRegisters>
-      fixed_double_live_ranges_;
+  ZoneVector<LiveRange*> fixed_live_ranges_;
+  ZoneVector<LiveRange*> fixed_double_live_ranges_;
   ZoneList<LiveRange*> unhandled_live_ranges_;
   ZoneList<LiveRange*> active_live_ranges_;
   ZoneList<LiveRange*> inactive_live_ranges_;
diff --git a/src/compiler/simplified-lowering.cc b/src/compiler/simplified-lowering.cc
index cd11f1a..ff48d34 100644
--- a/src/compiler/simplified-lowering.cc
+++ b/src/compiler/simplified-lowering.cc
@@ -510,6 +510,10 @@
     return (use & (kTypeUint32 | kTypeNumber | kTypeAny)) != 0;
   }
 
+  bool CanObserveNaN(MachineTypeUnion use) {
+    return (use & (kTypeNumber | kTypeAny)) != 0;
+  }
+
   bool CanObserveNonUint32(MachineTypeUnion use) {
     return (use & (kTypeInt32 | kTypeNumber | kTypeAny)) != 0;
   }
@@ -707,7 +711,7 @@
           if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
           break;
         }
-        if (CanLowerToUint32Binop(node, use)) {
+        if (BothInputsAre(node, Type::Unsigned32()) && !CanObserveNaN(use)) {
           // => unsigned Uint32Mod
           VisitUint32Binop(node);
           if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
@@ -1118,7 +1122,7 @@
 void SimplifiedLowering::DoLoadField(Node* node) {
   const FieldAccess& access = FieldAccessOf(node->op());
   node->set_op(machine()->Load(access.machine_type));
-  Node* offset = jsgraph()->Int32Constant(access.offset - access.tag());
+  Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
   node->InsertInput(graph()->zone(), 1, offset);
 }
 
@@ -1129,7 +1133,7 @@
       access.base_is_tagged, access.machine_type, access.type);
   node->set_op(
       machine()->Store(StoreRepresentation(access.machine_type, kind)));
-  Node* offset = jsgraph()->Int32Constant(access.offset - access.tag());
+  Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
   node->InsertInput(graph()->zone(), 1, offset);
 }
 
@@ -1137,20 +1141,22 @@
 Node* SimplifiedLowering::ComputeIndex(const ElementAccess& access,
                                        Node* const key) {
   Node* index = key;
-  const int element_size = ElementSizeOf(access.machine_type);
-  if (element_size != 1) {
-    index = graph()->NewNode(machine()->Int32Mul(), index,
-                             jsgraph()->Int32Constant(element_size));
+  const int element_size_shift = ElementSizeLog2Of(access.machine_type);
+  if (element_size_shift) {
+    index = graph()->NewNode(machine()->Word32Shl(), index,
+                             jsgraph()->Int32Constant(element_size_shift));
   }
   const int fixed_offset = access.header_size - access.tag();
-  if (fixed_offset != 0) {
+  if (fixed_offset) {
     index = graph()->NewNode(machine()->Int32Add(), index,
                              jsgraph()->Int32Constant(fixed_offset));
   }
-  // TODO(bmeurer): 64-Bit
-  // if (machine()->Is64()) {
-  //   index = graph()->NewNode(machine()->ChangeInt32ToInt64(), index);
-  // }
+  if (machine()->Is64()) {
+    // TODO(turbofan): This is probably only correct for typed arrays, and only
+    // if the typed arrays are at most 2GiB in size, which happens to match
+    // exactly our current situation.
+    index = graph()->NewNode(machine()->ChangeUint32ToUint64(), index);
+  }
   return index;
 }
 
diff --git a/src/compiler/x64/code-generator-x64.cc b/src/compiler/x64/code-generator-x64.cc
index de97103..3e54859 100644
--- a/src/compiler/x64/code-generator-x64.cc
+++ b/src/compiler/x64/code-generator-x64.cc
@@ -34,12 +34,7 @@
   Operand OutputOperand() { return ToOperand(instr_->Output()); }
 
   Immediate ToImmediate(InstructionOperand* operand) {
-    Constant constant = ToConstant(operand);
-    if (constant.type() == Constant::kInt32) {
-      return Immediate(constant.ToInt32());
-    }
-    UNREACHABLE();
-    return Immediate(-1);
+    return Immediate(ToConstant(operand).ToInt32());
   }
 
   Operand ToOperand(InstructionOperand* op, int extra = 0) {
@@ -407,6 +402,24 @@
         __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
       }
       break;
+    case kSSEFloat64Floor: {
+      CpuFeatureScope sse_scope(masm(), SSE4_1);
+      __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                 v8::internal::Assembler::kRoundDown);
+      break;
+    }
+    case kSSEFloat64Ceil: {
+      CpuFeatureScope sse_scope(masm(), SSE4_1);
+      __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                 v8::internal::Assembler::kRoundUp);
+      break;
+    }
+    case kSSEFloat64RoundTruncate: {
+      CpuFeatureScope sse_scope(masm(), SSE4_1);
+      __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                 v8::internal::Assembler::kRoundToZero);
+      break;
+    }
     case kSSECvtss2sd:
       if (instr->InputAt(0)->IsDoubleRegister()) {
         __ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
diff --git a/src/compiler/x64/instruction-codes-x64.h b/src/compiler/x64/instruction-codes-x64.h
index 835baf5..807fe14 100644
--- a/src/compiler/x64/instruction-codes-x64.h
+++ b/src/compiler/x64/instruction-codes-x64.h
@@ -52,6 +52,9 @@
   V(SSEFloat64Div)                 \
   V(SSEFloat64Mod)                 \
   V(SSEFloat64Sqrt)                \
+  V(SSEFloat64Floor)               \
+  V(SSEFloat64Ceil)                \
+  V(SSEFloat64RoundTruncate)       \
   V(SSECvtss2sd)                   \
   V(SSECvtsd2ss)                   \
   V(SSEFloat64ToInt32)             \
@@ -85,7 +88,7 @@
 // M = memory operand
 // R = base register
 // N = index register * N for N in {1, 2, 4, 8}
-// I = immediate displacement (int32_t)
+// I = immediate displacement (32-bit signed integer)
 
 #define TARGET_ADDRESSING_MODE_LIST(V) \
   V(MR)   /* [%r1            ] */      \
diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc
index 41674a3..17c63d2 100644
--- a/src/compiler/x64/instruction-selector-x64.cc
+++ b/src/compiler/x64/instruction-selector-x64.cc
@@ -25,6 +25,10 @@
     switch (node->opcode()) {
       case IrOpcode::kInt32Constant:
         return true;
+      case IrOpcode::kInt64Constant: {
+        const int64_t value = OpParameter<int64_t>(node);
+        return value == static_cast<int64_t>(static_cast<int32_t>(value));
+      }
       default:
         return false;
     }
@@ -36,98 +40,14 @@
 };
 
 
-// Get the AddressingMode of scale factor N from the AddressingMode of scale
-// factor 1.
-static AddressingMode AdjustAddressingMode(AddressingMode base_mode,
-                                           int power) {
-  DCHECK(0 <= power && power < 4);
-  return static_cast<AddressingMode>(static_cast<int>(base_mode) + power);
-}
-
-
-class AddressingModeMatcher {
- public:
-  AddressingModeMatcher(X64OperandGenerator* g, Node* base, Node* index)
-      : base_operand_(NULL),
-        index_operand_(NULL),
-        displacement_operand_(NULL),
-        mode_(kMode_None) {
-    Int32Matcher index_imm(index);
-    if (index_imm.HasValue()) {
-      int32_t value = index_imm.Value();
-      if (value == 0) {
-        mode_ = kMode_MR;
-      } else {
-        mode_ = kMode_MRI;
-        index_operand_ = g->UseImmediate(index);
-      }
-      base_operand_ = g->UseRegister(base);
-    } else {
-      // Compute base operand.
-      Int64Matcher base_imm(base);
-      if (!base_imm.HasValue() || base_imm.Value() != 0) {
-        base_operand_ = g->UseRegister(base);
-      }
-      // Compute index and displacement.
-      IndexAndDisplacementMatcher matcher(index);
-      index_operand_ = g->UseRegister(matcher.index_node());
-      if (matcher.displacement() != 0) {
-        displacement_operand_ = g->TempImmediate(matcher.displacement());
-      }
-      // Compute mode with scale factor one.
-      if (base_operand_ == NULL) {
-        if (displacement_operand_ == NULL) {
-          mode_ = kMode_M1;
-        } else {
-          mode_ = kMode_M1I;
-        }
-      } else {
-        if (displacement_operand_ == NULL) {
-          mode_ = kMode_MR1;
-        } else {
-          mode_ = kMode_MR1I;
-        }
-      }
-      // Adjust mode to actual scale factor.
-      mode_ = AdjustAddressingMode(mode_, matcher.power());
-    }
-    DCHECK_NE(kMode_None, mode_);
-  }
-
-  size_t SetInputs(InstructionOperand** inputs) {
-    size_t input_count = 0;
-    // Compute inputs_ and input_count.
-    if (base_operand_ != NULL) {
-      inputs[input_count++] = base_operand_;
-    }
-    if (index_operand_ != NULL) {
-      inputs[input_count++] = index_operand_;
-    }
-    if (displacement_operand_ != NULL) {
-      // Pure displacement mode not supported by x64.
-      DCHECK_NE(static_cast<int>(input_count), 0);
-      inputs[input_count++] = displacement_operand_;
-    }
-    DCHECK_NE(static_cast<int>(input_count), 0);
-    return input_count;
-  }
-
-  static const int kMaxInputCount = 3;
-  InstructionOperand* base_operand_;
-  InstructionOperand* index_operand_;
-  InstructionOperand* displacement_operand_;
-  AddressingMode mode_;
-};
-
-
 void InstructionSelector::VisitLoad(Node* node) {
   MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
   MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
-  Node* base = node->InputAt(0);
-  Node* index = node->InputAt(1);
+  X64OperandGenerator g(this);
+  Node* const base = node->InputAt(0);
+  Node* const index = node->InputAt(1);
 
   ArchOpcode opcode;
-  // TODO(titzer): signed/unsigned small loads
   switch (rep) {
     case kRepFloat32:
       opcode = kX64Movss;
@@ -153,14 +73,19 @@
       UNREACHABLE();
       return;
   }
-
-  X64OperandGenerator g(this);
-  AddressingModeMatcher matcher(&g, base, index);
-  InstructionCode code = opcode | AddressingModeField::encode(matcher.mode_);
-  InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
-  InstructionOperand* inputs[AddressingModeMatcher::kMaxInputCount];
-  size_t input_count = matcher.SetInputs(inputs);
-  Emit(code, 1, outputs, input_count, inputs);
+  if (g.CanBeImmediate(base)) {
+    // load [#base + %index]
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), g.UseRegister(index), g.UseImmediate(base));
+  } else if (g.CanBeImmediate(index)) {
+    // load [%base + #index]
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+  } else {
+    // load [%base + %index*1]
+    Emit(opcode | AddressingModeField::encode(kMode_MR1),
+         g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
+  }
 }
 
 
@@ -210,20 +135,21 @@
       UNREACHABLE();
       return;
   }
-
-  InstructionOperand* val;
-  if (g.CanBeImmediate(value)) {
-    val = g.UseImmediate(value);
+  InstructionOperand* value_operand =
+      g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
+  if (g.CanBeImmediate(base)) {
+    // store [#base + %index], %|#value
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr,
+         g.UseRegister(index), g.UseImmediate(base), value_operand);
+  } else if (g.CanBeImmediate(index)) {
+    // store [%base + #index], %|#value
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr,
+         g.UseRegister(base), g.UseImmediate(index), value_operand);
   } else {
-    val = g.UseRegister(value);
+    // store [%base + %index*1], %|#value
+    Emit(opcode | AddressingModeField::encode(kMode_MR1), nullptr,
+         g.UseRegister(base), g.UseRegister(index), value_operand);
   }
-
-  AddressingModeMatcher matcher(&g, base, index);
-  InstructionCode code = opcode | AddressingModeField::encode(matcher.mode_);
-  InstructionOperand* inputs[AddressingModeMatcher::kMaxInputCount + 1];
-  size_t input_count = matcher.SetInputs(inputs);
-  inputs[input_count++] = val;
-  Emit(code, 0, static_cast<InstructionOperand**>(NULL), input_count, inputs);
 }
 
 
@@ -334,19 +260,21 @@
 }
 
 
+namespace {
+
 // Shared routine for multiple 32-bit shift operations.
 // TODO(bmeurer): Merge this with VisitWord64Shift using template magic?
-static void VisitWord32Shift(InstructionSelector* selector, Node* node,
-                             ArchOpcode opcode) {
+void VisitWord32Shift(InstructionSelector* selector, Node* node,
+                      ArchOpcode opcode) {
   X64OperandGenerator g(selector);
-  Node* left = node->InputAt(0);
-  Node* right = node->InputAt(1);
+  Int32BinopMatcher m(node);
+  Node* left = m.left().node();
+  Node* right = m.right().node();
 
   if (g.CanBeImmediate(right)) {
     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
                    g.UseImmediate(right));
   } else {
-    Int32BinopMatcher m(node);
     if (m.right().IsWord32And()) {
       Int32BinopMatcher mright(right);
       if (mright.right().Is(0x1F)) {
@@ -361,17 +289,17 @@
 
 // Shared routine for multiple 64-bit shift operations.
 // TODO(bmeurer): Merge this with VisitWord32Shift using template magic?
-static void VisitWord64Shift(InstructionSelector* selector, Node* node,
-                             ArchOpcode opcode) {
+void VisitWord64Shift(InstructionSelector* selector, Node* node,
+                      ArchOpcode opcode) {
   X64OperandGenerator g(selector);
-  Node* left = node->InputAt(0);
-  Node* right = node->InputAt(1);
+  Int64BinopMatcher m(node);
+  Node* left = m.left().node();
+  Node* right = m.right().node();
 
   if (g.CanBeImmediate(right)) {
     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
                    g.UseImmediate(right));
   } else {
-    Int64BinopMatcher m(node);
     if (m.right().IsWord64And()) {
       Int64BinopMatcher mright(right);
       if (mright.right().Is(0x3F)) {
@@ -383,6 +311,8 @@
   }
 }
 
+}  // namespace
+
 
 void InstructionSelector::VisitWord32Shl(Node* node) {
   VisitWord32Shift(this, node, kX64Shl32);
@@ -424,57 +354,12 @@
 }
 
 
-static bool TryEmitLeaMultAdd(InstructionSelector* selector, Node* node,
-                              ArchOpcode opcode) {
-  int32_t displacement_value;
-  Node* left;
-  {
-    Int32BinopMatcher m32(node);
-    left = m32.left().node();
-    if (m32.right().HasValue()) {
-      displacement_value = m32.right().Value();
-    } else {
-      Int64BinopMatcher m64(node);
-      if (!m64.right().HasValue()) {
-        return false;
-      }
-      int64_t value_64 = m64.right().Value();
-      displacement_value = static_cast<int32_t>(value_64);
-      if (displacement_value != value_64) return false;
-    }
-  }
-  LeaMultiplyMatcher lmm(left);
-  if (!lmm.Matches()) return false;
-  AddressingMode mode;
-  size_t input_count;
-  X64OperandGenerator g(selector);
-  InstructionOperand* index = g.UseRegister(lmm.Left());
-  InstructionOperand* displacement = g.TempImmediate(displacement_value);
-  InstructionOperand* inputs[] = {index, displacement, displacement};
-  if (lmm.Displacement() != 0) {
-    input_count = 3;
-    inputs[1] = index;
-    mode = kMode_MR1I;
-  } else {
-    input_count = 2;
-    mode = kMode_M1I;
-  }
-  mode = AdjustAddressingMode(mode, lmm.Power());
-  InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
-  selector->Emit(opcode | AddressingModeField::encode(mode), 1, outputs,
-                 input_count, inputs);
-  return true;
-}
-
-
 void InstructionSelector::VisitInt32Add(Node* node) {
-  if (TryEmitLeaMultAdd(this, node, kX64Lea32)) return;
   VisitBinop(this, node, kX64Add32);
 }
 
 
 void InstructionSelector::VisitInt64Add(Node* node) {
-  if (TryEmitLeaMultAdd(this, node, kX64Lea)) return;
   VisitBinop(this, node, kX64Add);
 }
 
@@ -501,33 +386,9 @@
 }
 
 
-static bool TryEmitLeaMult(InstructionSelector* selector, Node* node,
-                           ArchOpcode opcode) {
-  LeaMultiplyMatcher lea(node);
-  // Try to match lea.
-  if (!lea.Matches()) return false;
-  AddressingMode mode;
-  size_t input_count;
-  X64OperandGenerator g(selector);
-  InstructionOperand* left = g.UseRegister(lea.Left());
-  InstructionOperand* inputs[] = {left, left};
-  if (lea.Displacement() != 0) {
-    input_count = 2;
-    mode = kMode_MR1;
-  } else {
-    input_count = 1;
-    mode = kMode_M1;
-  }
-  mode = AdjustAddressingMode(mode, lea.Power());
-  InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
-  selector->Emit(opcode | AddressingModeField::encode(mode), 1, outputs,
-                 input_count, inputs);
-  return true;
-}
+namespace {
 
-
-static void VisitMul(InstructionSelector* selector, Node* node,
-                     ArchOpcode opcode) {
+void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
   X64OperandGenerator g(selector);
   Int32BinopMatcher m(node);
   Node* left = m.left().node();
@@ -544,15 +405,15 @@
   }
 }
 
+}  // namespace
+
 
 void InstructionSelector::VisitInt32Mul(Node* node) {
-  if (TryEmitLeaMult(this, node, kX64Lea32)) return;
   VisitMul(this, node, kX64Imul32);
 }
 
 
 void InstructionSelector::VisitInt64Mul(Node* node) {
-  if (TryEmitLeaMult(this, node, kX64Lea)) return;
   VisitMul(this, node, kX64Imul);
 }
 
@@ -664,7 +525,37 @@
 
 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
   X64OperandGenerator g(this);
-  Emit(kX64Movl, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+  Node* value = node->InputAt(0);
+  switch (value->opcode()) {
+    case IrOpcode::kWord32And:
+    case IrOpcode::kWord32Or:
+    case IrOpcode::kWord32Xor:
+    case IrOpcode::kWord32Shl:
+    case IrOpcode::kWord32Shr:
+    case IrOpcode::kWord32Sar:
+    case IrOpcode::kWord32Ror:
+    case IrOpcode::kWord32Equal:
+    case IrOpcode::kInt32Add:
+    case IrOpcode::kInt32Sub:
+    case IrOpcode::kInt32Mul:
+    case IrOpcode::kInt32MulHigh:
+    case IrOpcode::kInt32Div:
+    case IrOpcode::kInt32LessThan:
+    case IrOpcode::kInt32LessThanOrEqual:
+    case IrOpcode::kInt32Mod:
+    case IrOpcode::kUint32Div:
+    case IrOpcode::kUint32LessThan:
+    case IrOpcode::kUint32LessThanOrEqual:
+    case IrOpcode::kUint32Mod: {
+      // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
+      // zero-extension is a no-op.
+      Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+      return;
+    }
+    default:
+      break;
+  }
+  Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
 }
 
 
@@ -676,7 +567,24 @@
 
 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
   X64OperandGenerator g(this);
-  Emit(kX64Movl, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+  Node* value = node->InputAt(0);
+  if (CanCover(node, value)) {
+    switch (value->opcode()) {
+      case IrOpcode::kWord64Sar:
+      case IrOpcode::kWord64Shr: {
+        Int64BinopMatcher m(value);
+        if (m.right().Is(32)) {
+          Emit(kX64Shr, g.DefineSameAsFirst(node),
+               g.UseRegister(m.left().node()), g.TempImmediate(32));
+          return;
+        }
+        break;
+      }
+      default:
+        break;
+    }
+  }
+  Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
 }
 
 
@@ -723,6 +631,41 @@
 }
 
 
+namespace {
+
+void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
+                    Node* node) {
+  X64OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)));
+}
+
+}  // namespace
+
+
+void InstructionSelector::VisitFloat64Floor(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(SSE4_1));
+  VisitRRFloat64(this, kSSEFloat64Floor, node);
+}
+
+
+void InstructionSelector::VisitFloat64Ceil(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(SSE4_1));
+  VisitRRFloat64(this, kSSEFloat64Ceil, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+  DCHECK(CpuFeatures::IsSupported(SSE4_1));
+  VisitRRFloat64(this, kSSEFloat64RoundTruncate, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+  UNREACHABLE();
+}
+
+
 void InstructionSelector::VisitCall(Node* node) {
   X64OperandGenerator g(this);
   CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
@@ -1112,9 +1055,13 @@
 // static
 MachineOperatorBuilder::Flags
 InstructionSelector::SupportedMachineOperatorFlags() {
+  if (CpuFeatures::IsSupported(SSE4_1)) {
+    return MachineOperatorBuilder::kFloat64Floor |
+           MachineOperatorBuilder::kFloat64Ceil |
+           MachineOperatorBuilder::kFloat64RoundTruncate;
+  }
   return MachineOperatorBuilder::kNoFlags;
 }
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/x64/linkage-x64.cc b/src/compiler/x64/linkage-x64.cc
index 8175bc6..0638591 100644
--- a/src/compiler/x64/linkage-x64.cc
+++ b/src/compiler/x64/linkage-x64.cc
@@ -63,7 +63,7 @@
 
 
 CallDescriptor* Linkage::GetStubCallDescriptor(
-    CallInterfaceDescriptor descriptor, int stack_parameter_count,
+    const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
     CallDescriptor::Flags flags, Zone* zone) {
   return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
                                    flags);
diff --git a/src/interface-descriptors.cc b/src/interface-descriptors.cc
index d41d53b..b1ebc3f 100644
--- a/src/interface-descriptors.cc
+++ b/src/interface-descriptors.cc
@@ -42,7 +42,7 @@
 }
 
 
-const char* CallInterfaceDescriptor::DebugName(Isolate* isolate) {
+const char* CallInterfaceDescriptor::DebugName(Isolate* isolate) const {
   CallInterfaceDescriptorData* start = isolate->call_descriptor_data(0);
   size_t index = data_ - start;
   DCHECK(index < CallDescriptors::NUMBER_OF_DESCRIPTORS);
diff --git a/src/interface-descriptors.h b/src/interface-descriptors.h
index c1f374c..3e4f3e7 100644
--- a/src/interface-descriptors.h
+++ b/src/interface-descriptors.h
@@ -163,7 +163,7 @@
 
   static const Register ContextRegister();
 
-  const char* DebugName(Isolate* isolate);
+  const char* DebugName(Isolate* isolate) const;
 
  protected:
   const CallInterfaceDescriptorData* data() const { return data_; }
diff --git a/src/isolate.cc b/src/isolate.cc
index 59fa038..a49eee6 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -1047,6 +1047,40 @@
 }
 
 
+void Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
+                                            Handle<Object> exception) {
+  *target = MessageLocation(Handle<Script>(heap_.empty_script()), -1, -1);
+
+  if (!exception->IsJSObject()) return;
+  Handle<Name> key = factory()->stack_trace_symbol();
+  Handle<Object> property =
+      JSObject::GetDataProperty(Handle<JSObject>::cast(exception), key);
+  if (!property->IsJSArray()) return;
+  Handle<JSArray> simple_stack_trace = Handle<JSArray>::cast(property);
+
+  Handle<FixedArray> elements(FixedArray::cast(simple_stack_trace->elements()));
+  int elements_limit = Smi::cast(simple_stack_trace->length())->value();
+
+  for (int i = 1; i < elements_limit; i += 4) {
+    Handle<JSFunction> fun =
+        handle(JSFunction::cast(elements->get(i + 1)), this);
+    if (fun->IsFromNativeScript()) continue;
+    Handle<Code> code = handle(Code::cast(elements->get(i + 2)), this);
+    Handle<Smi> offset = handle(Smi::cast(elements->get(i + 3)), this);
+    Address pc = code->address() + offset->value();
+
+    Object* script = fun->shared()->script();
+    if (script->IsScript() &&
+        !(Script::cast(script)->source()->IsUndefined())) {
+      int pos = code->SourcePosition(pc);
+      Handle<Script> casted_script(Script::cast(script));
+      *target = MessageLocation(casted_script, pos, pos + 1);
+      break;
+    }
+  }
+}
+
+
 bool Isolate::ShouldReportException(bool* can_be_caught_externally,
                                     bool catchable_by_javascript) {
   // Find the top-most try-catch handler.
@@ -1106,6 +1140,7 @@
 Handle<JSMessageObject> Isolate::CreateMessage(Handle<Object> exception,
                                                MessageLocation* location) {
   Handle<JSArray> stack_trace_object;
+  MessageLocation potential_computed_location;
   if (capture_stack_trace_for_uncaught_exceptions_) {
     if (IsErrorObject(exception)) {
       // We fetch the stack trace that corresponds to this error object.
@@ -1114,14 +1149,22 @@
       // at this throw site.
       stack_trace_object =
           GetDetailedStackTrace(Handle<JSObject>::cast(exception));
+      if (!location) {
+        ComputeLocationFromStackTrace(&potential_computed_location, exception);
+        location = &potential_computed_location;
+      }
     }
     if (stack_trace_object.is_null()) {
-      // Not an error object, we capture at throw site.
+      // Not an error object, we capture stack and location at throw site.
       stack_trace_object = CaptureCurrentStackTrace(
           stack_trace_for_uncaught_exceptions_frame_limit_,
           stack_trace_for_uncaught_exceptions_options_);
     }
   }
+  if (!location) {
+    ComputeLocation(&potential_computed_location);
+    location = &potential_computed_location;
+  }
 
   // If the exception argument is a custom object, turn it into a string
   // before throwing as uncaught exception.  Note that the pending
@@ -1227,11 +1270,9 @@
       Handle<Object> message_obj = CreateMessage(exception_handle, location);
 
       thread_local_top()->pending_message_obj_ = *message_obj;
-      if (location != NULL) {
-        thread_local_top()->pending_message_script_ = *location->script();
-        thread_local_top()->pending_message_start_pos_ = location->start_pos();
-        thread_local_top()->pending_message_end_pos_ = location->end_pos();
-      }
+      thread_local_top()->pending_message_script_ = *location->script();
+      thread_local_top()->pending_message_start_pos_ = location->start_pos();
+      thread_local_top()->pending_message_end_pos_ = location->end_pos();
 
       // If the abort-on-uncaught-exception flag is specified, abort on any
       // exception not caught by JavaScript, even when an external handler is
@@ -1334,7 +1375,6 @@
 
   if (thread_local_top_.pending_exception_ != heap()->termination_exception() &&
       thread_local_top_.has_pending_message_ &&
-      !thread_local_top_.pending_message_obj_->IsTheHole() &&
       !thread_local_top_.pending_message_obj_->IsTheHole()) {
     Handle<Script> script(
         Script::cast(thread_local_top_.pending_message_script_));
diff --git a/src/isolate.h b/src/isolate.h
index 7944d0e..7e50929 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -801,6 +801,11 @@
   // Attempts to compute the current source location, storing the
   // result in the target out parameter.
   void ComputeLocation(MessageLocation* target);
+  void ComputeLocationFromStackTrace(MessageLocation* target,
+                                     Handle<Object> exception);
+
+  Handle<JSMessageObject> CreateMessage(Handle<Object> exception,
+                                        MessageLocation* location);
 
   // Out of resource exception helpers.
   Object* StackOverflow();
@@ -1201,9 +1206,6 @@
   // then return true.
   bool PropagatePendingExceptionToExternalTryCatch();
 
-  Handle<JSMessageObject> CreateMessage(Handle<Object> exception,
-                                        MessageLocation* location);
-
   // Traverse prototype chain to find out whether the object is derived from
   // the Error object.
   bool IsErrorObject(Handle<Object> obj);
diff --git a/src/messages.js b/src/messages.js
index 513ffdc..a9da851 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -367,6 +367,23 @@
   return MakeGenericError($Error, type, args);
 }
 
+
+// The embedded versions are called from unoptimized code, with embedded
+// arguments. Those arguments cannot be arrays, which are context-dependent.
+function MakeTypeErrorEmbedded(type, arg) {
+  return MakeGenericError($TypeError, type, [arg]);
+}
+
+
+function MakeSyntaxErrorEmbedded(type, arg) {
+  return MakeGenericError($SyntaxError, type, [arg]);
+}
+
+
+function MakeReferenceErrorEmbedded(type, arg) {
+  return MakeGenericError($ReferenceError, type, [arg]);
+}
+
 /**
  * Find a line number given a specific source position.
  * @param {number} position The source position.
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index 75e2ed1..cce2b8b 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -595,7 +595,7 @@
   // Static so that subsequent calls frees previously allocated space.
   // This also means that previous results will be overwritten.
   static char* buffer = NULL;
-  if (buffer != NULL) free(buffer);
+  if (buffer != NULL) delete[] buffer;
   buffer = new char[length()+1];
   WriteToFlat(this, reinterpret_cast<uint8_t*>(buffer), 0, length());
   buffer[length()] = 0;
diff --git a/src/parser.cc b/src/parser.cc
index 5c85b97..ab9d6b7 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -519,7 +519,7 @@
 Expression* ParserTraits::NewThrowReferenceError(const char* message, int pos) {
   return NewThrowError(
       parser_->ast_value_factory()->make_reference_error_string(), message,
-      NULL, pos);
+      parser_->ast_value_factory()->empty_string(), pos);
 }
 
 
@@ -541,17 +541,11 @@
     const AstRawString* constructor, const char* message,
     const AstRawString* arg, int pos) {
   Zone* zone = parser_->zone();
-  int argc = arg != NULL ? 1 : 0;
   const AstRawString* type =
       parser_->ast_value_factory()->GetOneByteString(message);
-  ZoneList<const AstRawString*>* array =
-      new (zone) ZoneList<const AstRawString*>(argc, zone);
-  if (arg != NULL) {
-    array->Add(arg, zone);
-  }
   ZoneList<Expression*>* args = new (zone) ZoneList<Expression*>(2, zone);
   args->Add(parser_->factory()->NewStringLiteral(type, pos), zone);
-  args->Add(parser_->factory()->NewStringListLiteral(array, pos), zone);
+  args->Add(parser_->factory()->NewStringLiteral(arg, pos), zone);
   CallRuntime* call_constructor =
       parser_->factory()->NewCallRuntime(constructor, NULL, args, pos);
   return parser_->factory()->NewThrow(call_constructor, pos);
diff --git a/src/runtime/runtime-debug.cc b/src/runtime/runtime-debug.cc
index 1555106..2de372f 100644
--- a/src/runtime/runtime-debug.cc
+++ b/src/runtime/runtime-debug.cc
@@ -2614,48 +2614,30 @@
 // traversals might be required rendering this operation as a rather slow
 // operation. However for setting break points which is normally done through
 // some kind of user interaction the performance is not crucial.
-static Handle<Object> Runtime_GetScriptFromScriptName(
-    Handle<String> script_name) {
-  // Scan the heap for Script objects to find the script with the requested
-  // script data.
-  Handle<Script> script;
-  Factory* factory = script_name->GetIsolate()->factory();
-  Heap* heap = script_name->GetHeap();
-  HeapIterator iterator(heap);
-  HeapObject* obj = NULL;
-  while (script.is_null() && ((obj = iterator.next()) != NULL)) {
-    // If a script is found check if it has the script data requested.
-    if (obj->IsScript()) {
-      if (Script::cast(obj)->name()->IsString()) {
-        if (String::cast(Script::cast(obj)->name())->Equals(*script_name)) {
-          script = Handle<Script>(Script::cast(obj));
-        }
+RUNTIME_FUNCTION(Runtime_GetScript) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(String, script_name, 0);
+
+  Handle<Script> found;
+  Heap* heap = isolate->heap();
+  {
+    HeapIterator iterator(heap);
+    HeapObject* obj = NULL;
+    while ((obj = iterator.next()) != NULL) {
+      if (!obj->IsScript()) continue;
+      Script* script = Script::cast(obj);
+      if (!script->name()->IsString()) continue;
+      String* name = String::cast(script->name());
+      if (name->Equals(*script_name)) {
+        found = Handle<Script>(script, isolate);
+        break;
       }
     }
   }
 
-  // If no script with the requested script data is found return undefined.
-  if (script.is_null()) return factory->undefined_value();
-
-  // Return the script found.
-  return Script::GetWrapper(script);
-}
-
-
-// Get the script object from script data. NOTE: Regarding performance
-// see the NOTE for GetScriptFromScriptData.
-// args[0]: script data for the script to find the source for
-RUNTIME_FUNCTION(Runtime_GetScript) {
-  HandleScope scope(isolate);
-
-  DCHECK(args.length() == 1);
-
-  CONVERT_ARG_CHECKED(String, script_name, 0);
-
-  // Find the requested script.
-  Handle<Object> result =
-      Runtime_GetScriptFromScriptName(Handle<String>(script_name));
-  return *result;
+  if (found.is_null()) return heap->undefined_value();
+  return *Script::GetWrapper(found);
 }
 
 
diff --git a/src/version.cc b/src/version.cc
index 1645d55..86ffc02 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // system so their names cannot be changed without changing the scripts.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     30
-#define BUILD_NUMBER      22
+#define BUILD_NUMBER      23
 #define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
diff --git a/test/cctest/compiler/codegen-tester.h b/test/cctest/compiler/codegen-tester.h
index 6af5c78..72d658f 100644
--- a/test/cctest/compiler/codegen-tester.h
+++ b/test/cctest/compiler/codegen-tester.h
@@ -7,6 +7,7 @@
 
 #include "src/v8.h"
 
+#include "src/compiler/instruction-selector.h"
 #include "src/compiler/pipeline.h"
 #include "src/compiler/raw-machine-assembler.h"
 #include "src/simulator.h"
@@ -23,7 +24,9 @@
  public:
   MachineAssemblerTester(MachineType return_type, MachineType p0,
                          MachineType p1, MachineType p2, MachineType p3,
-                         MachineType p4)
+                         MachineType p4,
+                         MachineOperatorBuilder::Flags flags =
+                             MachineOperatorBuilder::Flag::kNoFlags)
       : HandleAndZoneScope(),
         CallHelper(
             main_isolate(),
@@ -31,7 +34,7 @@
         MachineAssembler(
             new (main_zone()) Graph(main_zone()),
             MakeMachineSignature(main_zone(), return_type, p0, p1, p2, p3, p4),
-            kMachPtr) {}
+            kMachPtr, flags) {}
 
   Node* LoadFromPointer(void* address, MachineType rep, int32_t offset = 0) {
     return this->Load(rep, this->PointerConstant(address),
@@ -89,8 +92,8 @@
                             MachineType p3 = kMachNone,
                             MachineType p4 = kMachNone)
       : MachineAssemblerTester<RawMachineAssembler>(
-            ReturnValueTraits<ReturnType>::Representation(), p0, p1, p2, p3,
-            p4) {}
+            ReturnValueTraits<ReturnType>::Representation(), p0, p1, p2, p3, p4,
+            InstructionSelector::SupportedMachineOperatorFlags()) {}
 
   template <typename Ci, typename Fn>
   void Run(const Ci& ci, const Fn& fn) {
diff --git a/test/cctest/compiler/test-codegen-deopt.cc b/test/cctest/compiler/test-codegen-deopt.cc
index 8ee4429..e299c3d 100644
--- a/test/cctest/compiler/test-codegen-deopt.cc
+++ b/test/cctest/compiler/test-codegen-deopt.cc
@@ -66,10 +66,10 @@
     // Initialize the codegen and generate code.
     Linkage* linkage = new (scope_->main_zone()) Linkage(info.zone(), &info);
     code = new v8::internal::compiler::InstructionSequence(scope_->main_zone(),
-                                                           graph, schedule);
+                                                           schedule);
     SourcePositionTable source_positions(graph);
-    InstructionSelector selector(scope_->main_zone(), linkage, code, schedule,
-                                 &source_positions);
+    InstructionSelector selector(scope_->main_zone(), graph, linkage, code,
+                                 schedule, &source_positions);
     selector.SelectInstructions();
 
     if (FLAG_trace_turbo) {
@@ -78,7 +78,8 @@
     }
 
     Frame frame;
-    RegisterAllocator allocator(scope_->main_zone(), &frame, code);
+    RegisterAllocator allocator(RegisterAllocator::PlatformConfig(),
+                                scope_->main_zone(), &frame, code);
     CHECK(allocator.Allocate());
 
     if (FLAG_trace_turbo) {
diff --git a/test/cctest/compiler/test-instruction.cc b/test/cctest/compiler/test-instruction.cc
index d61f34c..2b41e40 100644
--- a/test/cctest/compiler/test-instruction.cc
+++ b/test/cctest/compiler/test-instruction.cc
@@ -55,7 +55,7 @@
       Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
       DCHECK(schedule.rpo_order()->size() > 0);
     }
-    code = new TestInstrSeq(main_zone(), &graph, &schedule);
+    code = new TestInstrSeq(main_zone(), &schedule);
   }
 
   Node* Int32Constant(int32_t val) {
@@ -128,8 +128,6 @@
 
   R.allocCode();
 
-  CHECK_EQ(R.graph.NodeCount(), R.code->node_count());
-
   BasicBlockVector* blocks = R.schedule.rpo_order();
   CHECK_EQ(static_cast<int>(blocks->size()), R.code->InstructionBlockCount());
 
diff --git a/test/cctest/compiler/test-run-machops.cc b/test/cctest/compiler/test-run-machops.cc
index 6935771..271967d 100644
--- a/test/cctest/compiler/test-run-machops.cc
+++ b/test/cctest/compiler/test-run-machops.cc
@@ -2,10 +2,12 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include <cmath>
 #include <functional>
 #include <limits>
 
 #include "src/base/bits.h"
+#include "src/codegen.h"
 #include "src/compiler/generic-node-inl.h"
 #include "test/cctest/cctest.h"
 #include "test/cctest/compiler/codegen-tester.h"
@@ -4535,4 +4537,171 @@
   }
 }
 
+
+static double two_30 = 1 << 30;             // 2^30 is a smi boundary.
+static double two_52 = two_30 * (1 << 22);  // 2^52 is a precision boundary.
+static double kValues[] = {0.1,
+                           0.2,
+                           0.49999999999999994,
+                           0.5,
+                           0.7,
+                           1.0 - std::numeric_limits<double>::epsilon(),
+                           -0.1,
+                           -0.49999999999999994,
+                           -0.5,
+                           -0.7,
+                           1.1,
+                           1.0 + std::numeric_limits<double>::epsilon(),
+                           1.5,
+                           1.7,
+                           -1,
+                           -1 + std::numeric_limits<double>::epsilon(),
+                           -1 - std::numeric_limits<double>::epsilon(),
+                           -1.1,
+                           -1.5,
+                           -1.7,
+                           std::numeric_limits<double>::min(),
+                           -std::numeric_limits<double>::min(),
+                           std::numeric_limits<double>::max(),
+                           -std::numeric_limits<double>::max(),
+                           std::numeric_limits<double>::infinity(),
+                           -std::numeric_limits<double>::infinity(),
+                           two_30,
+                           two_30 + 0.1,
+                           two_30 + 0.5,
+                           two_30 + 0.7,
+                           two_30 - 1,
+                           two_30 - 1 + 0.1,
+                           two_30 - 1 + 0.5,
+                           two_30 - 1 + 0.7,
+                           -two_30,
+                           -two_30 + 0.1,
+                           -two_30 + 0.5,
+                           -two_30 + 0.7,
+                           -two_30 + 1,
+                           -two_30 + 1 + 0.1,
+                           -two_30 + 1 + 0.5,
+                           -two_30 + 1 + 0.7,
+                           two_52,
+                           two_52 + 0.1,
+                           two_52 + 0.5,
+                           two_52 + 0.5,
+                           two_52 + 0.7,
+                           two_52 + 0.7,
+                           two_52 - 1,
+                           two_52 - 1 + 0.1,
+                           two_52 - 1 + 0.5,
+                           two_52 - 1 + 0.7,
+                           -two_52,
+                           -two_52 + 0.1,
+                           -two_52 + 0.5,
+                           -two_52 + 0.7,
+                           -two_52 + 1,
+                           -two_52 + 1 + 0.1,
+                           -two_52 + 1 + 0.5,
+                           -two_52 + 1 + 0.7,
+                           two_30,
+                           two_30 - 0.1,
+                           two_30 - 0.5,
+                           two_30 - 0.7,
+                           two_30 - 1,
+                           two_30 - 1 - 0.1,
+                           two_30 - 1 - 0.5,
+                           two_30 - 1 - 0.7,
+                           -two_30,
+                           -two_30 - 0.1,
+                           -two_30 - 0.5,
+                           -two_30 - 0.7,
+                           -two_30 + 1,
+                           -two_30 + 1 - 0.1,
+                           -two_30 + 1 - 0.5,
+                           -two_30 + 1 - 0.7,
+                           two_52,
+                           two_52 - 0.1,
+                           two_52 - 0.5,
+                           two_52 - 0.5,
+                           two_52 - 0.7,
+                           two_52 - 0.7,
+                           two_52 - 1,
+                           two_52 - 1 - 0.1,
+                           two_52 - 1 - 0.5,
+                           two_52 - 1 - 0.7,
+                           -two_52,
+                           -two_52 - 0.1,
+                           -two_52 - 0.5,
+                           -two_52 - 0.7,
+                           -two_52 + 1,
+                           -two_52 + 1 - 0.1,
+                           -two_52 + 1 - 0.5,
+                           -two_52 + 1 - 0.7};
+
+
+TEST(RunFloat64Floor) {
+  double input = -1.0;
+  double result = 0.0;
+  RawMachineAssemblerTester<int32_t> m;
+  if (!m.machine()->HasFloat64Floor()) return;
+  m.StoreToPointer(&result, kMachFloat64,
+                   m.Float64Floor(m.LoadFromPointer(&input, kMachFloat64)));
+  m.Return(m.Int32Constant(0));
+  for (size_t i = 0; i < arraysize(kValues); ++i) {
+    input = kValues[i];
+    CHECK_EQ(0, m.Call());
+    double expected = std::floor(kValues[i]);
+    CHECK_EQ(expected, result);
+  }
+}
+
+
+TEST(RunFloat64Ceil) {
+  double input = -1.0;
+  double result = 0.0;
+  RawMachineAssemblerTester<int32_t> m;
+  if (!m.machine()->HasFloat64Ceil()) return;
+  m.StoreToPointer(&result, kMachFloat64,
+                   m.Float64Ceil(m.LoadFromPointer(&input, kMachFloat64)));
+  m.Return(m.Int32Constant(0));
+  for (size_t i = 0; i < arraysize(kValues); ++i) {
+    input = kValues[i];
+    CHECK_EQ(0, m.Call());
+    double expected = std::ceil(kValues[i]);
+    CHECK_EQ(expected, result);
+  }
+}
+
+
+TEST(RunFloat64RoundTruncate) {
+  double input = -1.0;
+  double result = 0.0;
+  RawMachineAssemblerTester<int32_t> m;
+  if (!m.machine()->HasFloat64Ceil()) return;
+  m.StoreToPointer(
+      &result, kMachFloat64,
+      m.Float64RoundTruncate(m.LoadFromPointer(&input, kMachFloat64)));
+  m.Return(m.Int32Constant(0));
+  for (size_t i = 0; i < arraysize(kValues); ++i) {
+    input = kValues[i];
+    CHECK_EQ(0, m.Call());
+    double expected = trunc(kValues[i]);
+    CHECK_EQ(expected, result);
+  }
+}
+
+
+TEST(RunFloat64RoundTiesAway) {
+  double input = -1.0;
+  double result = 0.0;
+  RawMachineAssemblerTester<int32_t> m;
+  if (!m.machine()->HasFloat64RoundTiesAway()) return;
+  m.StoreToPointer(
+      &result, kMachFloat64,
+      m.Float64RoundTiesAway(m.LoadFromPointer(&input, kMachFloat64)));
+  m.Return(m.Int32Constant(0));
+  for (size_t i = 0; i < arraysize(kValues); ++i) {
+    input = kValues[i];
+    CHECK_EQ(0, m.Call());
+    double expected = round(kValues[i]);
+    CHECK_EQ(expected, result);
+  }
+}
 #endif  // V8_TURBOFAN_TARGET
diff --git a/test/cctest/compiler/test-simplified-lowering.cc b/test/cctest/compiler/test-simplified-lowering.cc
index e77f57a..dcbb9c8 100644
--- a/test/cctest/compiler/test-simplified-lowering.cc
+++ b/test/cctest/compiler/test-simplified-lowering.cc
@@ -1010,8 +1010,10 @@
     TestingGraph t(test_types[i], test_types[i]);
 
     t.CheckLoweringBinop(IrOpcode::kFloat64Div, t.simplified()->NumberDivide());
-    t.CheckLoweringBinop(IrOpcode::kFloat64Mod,
-                         t.simplified()->NumberModulus());
+    if (!test_types[i]->Is(Type::Unsigned32())) {
+      t.CheckLoweringBinop(IrOpcode::kFloat64Mod,
+                           t.simplified()->NumberModulus());
+    }
   }
 }
 
@@ -1324,45 +1326,54 @@
 }
 
 
+namespace {
+
 void CheckFieldAccessArithmetic(FieldAccess access, Node* load_or_store) {
-  Int32Matcher index = Int32Matcher(load_or_store->InputAt(1));
-  CHECK(index.Is(access.offset - access.tag()));
+  IntPtrMatcher mindex(load_or_store->InputAt(1));
+  CHECK(mindex.Is(access.offset - access.tag()));
 }
 
 
 Node* CheckElementAccessArithmetic(ElementAccess access, Node* load_or_store) {
-  Int32BinopMatcher index(load_or_store->InputAt(1));
-  CHECK_EQ(IrOpcode::kInt32Add, index.node()->opcode());
-  CHECK(index.right().Is(access.header_size - access.tag()));
+  Node* index = load_or_store->InputAt(1);
+  if (kPointerSize == 8) {
+    CHECK_EQ(IrOpcode::kChangeUint32ToUint64, index->opcode());
+    index = index->InputAt(0);
+  }
 
-  int element_size = ElementSizeOf(access.machine_type);
+  Int32BinopMatcher mindex(index);
+  CHECK_EQ(IrOpcode::kInt32Add, mindex.node()->opcode());
+  CHECK(mindex.right().Is(access.header_size - access.tag()));
 
-  if (element_size != 1) {
-    Int32BinopMatcher mul(index.left().node());
-    CHECK_EQ(IrOpcode::kInt32Mul, mul.node()->opcode());
-    CHECK(mul.right().Is(element_size));
-    return mul.left().node();
+  const int element_size_shift = ElementSizeLog2Of(access.machine_type);
+  if (element_size_shift) {
+    Int32BinopMatcher shl(mindex.left().node());
+    CHECK_EQ(IrOpcode::kWord32Shl, shl.node()->opcode());
+    CHECK(shl.right().Is(element_size_shift));
+    return shl.left().node();
   } else {
-    return index.left().node();
+    return mindex.left().node();
   }
 }
 
 
-static const MachineType machine_reps[] = {
-    kRepBit,    kMachInt8,    kMachInt16,    kMachInt32,
-    kMachInt64, kMachFloat64, kMachAnyTagged};
+const MachineType kMachineReps[] = {kRepBit,       kMachInt8,  kMachInt16,
+                                    kMachInt32,    kMachInt64, kMachFloat64,
+                                    kMachAnyTagged};
+
+}  // namespace
 
 
 TEST(LowerLoadField_to_load) {
   TestingGraph t(Type::Any(), Type::Signed32());
 
-  for (size_t i = 0; i < arraysize(machine_reps); i++) {
+  for (size_t i = 0; i < arraysize(kMachineReps); i++) {
     FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
-                          Handle<Name>::null(), Type::Any(), machine_reps[i]};
+                          Handle<Name>::null(), Type::Any(), kMachineReps[i]};
 
     Node* load =
         t.graph()->NewNode(t.simplified()->LoadField(access), t.p0, t.start);
-    Node* use = t.Use(load, machine_reps[i]);
+    Node* use = t.Use(load, kMachineReps[i]);
     t.Return(use);
     t.Lower();
     CHECK_EQ(IrOpcode::kLoad, load->opcode());
@@ -1370,7 +1381,7 @@
     CheckFieldAccessArithmetic(access, load);
 
     MachineType rep = OpParameter<MachineType>(load);
-    CHECK_EQ(machine_reps[i], rep);
+    CHECK_EQ(kMachineReps[i], rep);
   }
 }
 
@@ -1378,12 +1389,12 @@
 TEST(LowerStoreField_to_store) {
   TestingGraph t(Type::Any(), Type::Signed32());
 
-  for (size_t i = 0; i < arraysize(machine_reps); i++) {
+  for (size_t i = 0; i < arraysize(kMachineReps); i++) {
     FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
-                          Handle<Name>::null(), Type::Any(), machine_reps[i]};
+                          Handle<Name>::null(), Type::Any(), kMachineReps[i]};
 
 
-    Node* val = t.ExampleWithOutput(machine_reps[i]);
+    Node* val = t.ExampleWithOutput(kMachineReps[i]);
     Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
                                      val, t.start, t.start);
     t.Effect(store);
@@ -1393,10 +1404,10 @@
     CheckFieldAccessArithmetic(access, store);
 
     StoreRepresentation rep = OpParameter<StoreRepresentation>(store);
-    if (machine_reps[i] & kRepTagged) {
+    if (kMachineReps[i] & kRepTagged) {
       CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
     }
-    CHECK_EQ(machine_reps[i], rep.machine_type());
+    CHECK_EQ(kMachineReps[i], rep.machine_type());
   }
 }
 
@@ -1404,15 +1415,15 @@
 TEST(LowerLoadElement_to_load) {
   TestingGraph t(Type::Any(), Type::Signed32());
 
-  for (size_t i = 0; i < arraysize(machine_reps); i++) {
+  for (size_t i = 0; i < arraysize(kMachineReps); i++) {
     ElementAccess access = {kNoBoundsCheck, kTaggedBase,
                             FixedArrayBase::kHeaderSize, Type::Any(),
-                            machine_reps[i]};
+                            kMachineReps[i]};
 
     Node* load =
         t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0, t.p1,
                            t.jsgraph.Int32Constant(1024), t.start, t.start);
-    Node* use = t.Use(load, machine_reps[i]);
+    Node* use = t.Use(load, kMachineReps[i]);
     t.Return(use);
     t.Lower();
     CHECK_EQ(IrOpcode::kLoad, load->opcode());
@@ -1420,7 +1431,7 @@
     CheckElementAccessArithmetic(access, load);
 
     MachineType rep = OpParameter<MachineType>(load);
-    CHECK_EQ(machine_reps[i], rep);
+    CHECK_EQ(kMachineReps[i], rep);
   }
 }
 
@@ -1428,12 +1439,12 @@
 TEST(LowerStoreElement_to_store) {
   TestingGraph t(Type::Any(), Type::Signed32());
 
-  for (size_t i = 0; i < arraysize(machine_reps); i++) {
+  for (size_t i = 0; i < arraysize(kMachineReps); i++) {
     ElementAccess access = {kNoBoundsCheck, kTaggedBase,
                             FixedArrayBase::kHeaderSize, Type::Any(),
-                            machine_reps[i]};
+                            kMachineReps[i]};
 
-    Node* val = t.ExampleWithOutput(machine_reps[i]);
+    Node* val = t.ExampleWithOutput(kMachineReps[i]);
     Node* store = t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
                                      t.p1, t.jsgraph.Int32Constant(1024), val,
                                      t.start, t.start);
@@ -1444,10 +1455,10 @@
     CheckElementAccessArithmetic(access, store);
 
     StoreRepresentation rep = OpParameter<StoreRepresentation>(store);
-    if (machine_reps[i] & kRepTagged) {
+    if (kMachineReps[i] & kRepTagged) {
       CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
     }
-    CHECK_EQ(machine_reps[i], rep.machine_type());
+    CHECK_EQ(kMachineReps[i], rep.machine_type());
   }
 }
 
@@ -1927,3 +1938,22 @@
     CHECK_EQ(IrOpcode::kFloat64Mod, mod->opcode());  // Pesky -0 behavior.
   }
 }
+
+
+TEST(NumberModulus_Uint32) {
+  const double kConstants[] = {2, 100, 1000, 1024, 2048};
+  const MachineType kTypes[] = {kMachInt32, kMachUint32};
+
+  for (auto const type : kTypes) {
+    for (auto const c : kConstants) {
+      TestingGraph t(Type::Unsigned32());
+      Node* k = t.jsgraph.Constant(c);
+      Node* mod = t.graph()->NewNode(t.simplified()->NumberModulus(), t.p0, k);
+      Node* use = t.Use(mod, type);
+      t.Return(use);
+      t.Lower();
+
+      CHECK_EQ(IrOpcode::kUint32Mod, use->InputAt(0)->opcode());
+    }
+  }
+}
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index 11393f0..da59488 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -8547,7 +8547,7 @@
 }
 
 
-THREADED_TEST(ExceptionGetStackTrace) {
+THREADED_TEST(ExceptionGetMessage) {
   LocalContext context;
   v8::HandleScope scope(context->GetIsolate());
 
@@ -8559,16 +8559,25 @@
   global->Set(v8_str("throwV8Exception"), fun->GetFunction());
 
   TryCatch try_catch;
-  CompileRun("function f1() { throwV8Exception(); }; f1();");
+  CompileRun(
+      "function f1() {\n"
+      "  throwV8Exception();\n"
+      "};\n"
+      "f1();");
   CHECK(try_catch.HasCaught());
 
   v8::Handle<v8::Value> error = try_catch.Exception();
-  v8::Handle<String> foo = v8_str("foo");
-  v8::Handle<String> message = v8_str("message");
+  v8::Handle<String> foo_str = v8_str("foo");
+  v8::Handle<String> message_str = v8_str("message");
   CHECK(error->IsObject());
-  CHECK(error.As<v8::Object>()->Get(message)->Equals(foo));
+  CHECK(error.As<v8::Object>()->Get(message_str)->Equals(foo_str));
 
-  v8::Handle<v8::StackTrace> stackTrace = v8::Exception::GetStackTrace(error);
+  v8::Handle<v8::Message> message = v8::Exception::GetMessage(error);
+  CHECK(!message.IsEmpty());
+  CHECK_EQ(2, message->GetLineNumber());
+  CHECK_EQ(2, message->GetStartColumn());
+
+  v8::Handle<v8::StackTrace> stackTrace = message->GetStackTrace();
   CHECK(!stackTrace.IsEmpty());
   CHECK_EQ(2, stackTrace->GetFrameCount());
 
@@ -17873,7 +17882,8 @@
     promise_reject_counter++;
     CcTest::global()->Set(v8_str("rejected"), message.GetPromise());
     CcTest::global()->Set(v8_str("value"), message.GetValue());
-    v8::Handle<v8::StackTrace> stack_trace = message.GetStackTrace();
+    v8::Handle<v8::StackTrace> stack_trace =
+        v8::Exception::GetMessage(message.GetValue())->GetStackTrace();
     if (!stack_trace.IsEmpty()) {
       promise_reject_frame_count = stack_trace->GetFrameCount();
       if (promise_reject_frame_count > 0) {
@@ -17887,7 +17897,6 @@
     promise_revoke_counter++;
     CcTest::global()->Set(v8_str("revoked"), message.GetPromise());
     CHECK(message.GetValue().IsEmpty());
-    CHECK(message.GetStackTrace().IsEmpty());
   }
 }
 
diff --git a/test/cctest/test-assembler-arm.cc b/test/cctest/test-assembler-arm.cc
index 9079d5a..b6e260e 100644
--- a/test/cctest/test-assembler-arm.cc
+++ b/test/cctest/test-assembler-arm.cc
@@ -1676,4 +1676,100 @@
   CHECK_EQ(42, res);
 }
 
+
+TEST(ARMv8_vrintX) {
+  // Test the vrintX floating point instructions.
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+
+  typedef struct {
+    double input;
+    double ar;
+    double nr;
+    double mr;
+    double pr;
+    double zr;
+  } T;
+  T t;
+
+  // Create a function that accepts &t, and loads, manipulates, and stores
+  // the doubles and floats.
+  Assembler assm(isolate, NULL, 0);
+  Label L, C;
+
+
+  if (CpuFeatures::IsSupported(ARMv8)) {
+    CpuFeatureScope scope(&assm, ARMv8);
+
+    __ mov(ip, Operand(sp));
+    __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
+
+    __ mov(r4, Operand(r0));
+
+    // Test vrinta
+    __ vldr(d6, r4, OFFSET_OF(T, input));
+    __ vrinta(d5, d6);
+    __ vstr(d5, r4, OFFSET_OF(T, ar));
+
+    // Test vrintn
+    __ vldr(d6, r4, OFFSET_OF(T, input));
+    __ vrintn(d5, d6);
+    __ vstr(d5, r4, OFFSET_OF(T, nr));
+
+    // Test vrintp
+    __ vldr(d6, r4, OFFSET_OF(T, input));
+    __ vrintp(d5, d6);
+    __ vstr(d5, r4, OFFSET_OF(T, pr));
+
+    // Test vrintm
+    __ vldr(d6, r4, OFFSET_OF(T, input));
+    __ vrintm(d5, d6);
+    __ vstr(d5, r4, OFFSET_OF(T, mr));
+
+    // Test vrintz
+    __ vldr(d6, r4, OFFSET_OF(T, input));
+    __ vrintz(d5, d6);
+    __ vstr(d5, r4, OFFSET_OF(T, zr));
+
+    __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
+
+    CodeDesc desc;
+    assm.GetCode(&desc);
+    Handle<Code> code = isolate->factory()->NewCode(
+        desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef DEBUG
+    OFStream os(stdout);
+    code->Print(os);
+#endif
+    F3 f = FUNCTION_CAST<F3>(code->entry());
+
+    Object* dummy = nullptr;
+    USE(dummy);
+
+#define CHECK_VRINT(input_val, ares, nres, mres, pres, zres) \
+  t.input = input_val;                                       \
+  dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);            \
+  CHECK_EQ(ares, t.ar);                                      \
+  CHECK_EQ(nres, t.nr);                                      \
+  CHECK_EQ(mres, t.mr);                                      \
+  CHECK_EQ(pres, t.pr);                                      \
+  CHECK_EQ(zres, t.zr);
+
+    CHECK_VRINT(-0.5, -1.0, -0.0, -1.0, -0.0, -0.0)
+    CHECK_VRINT(-0.6, -1.0, -1.0, -1.0, -0.0, -0.0)
+    CHECK_VRINT(-1.1, -1.0, -1.0, -2.0, -1.0, -1.0)
+    CHECK_VRINT(0.5, 1.0, 0.0, 0.0, 1.0, 0.0)
+    CHECK_VRINT(0.6, 1.0, 1.0, 0.0, 1.0, 0.0)
+    CHECK_VRINT(1.1, 1.0, 1.0, 1.0, 2.0, 1.0)
+    double inf = std::numeric_limits<double>::infinity();
+    CHECK_VRINT(inf, inf, inf, inf, inf, inf)
+    CHECK_VRINT(-inf, -inf, -inf, -inf, -inf, -inf)
+    CHECK_VRINT(-0.0, -0.0, -0.0, -0.0, -0.0, -0.0)
+    double nan = std::numeric_limits<double>::quiet_NaN();
+    CHECK_VRINT(nan, nan, nan, nan, nan, nan)
+
+#undef CHECK_VRINT
+  }
+}
 #undef __
diff --git a/test/cctest/test-disasm-arm.cc b/test/cctest/test-disasm-arm.cc
index 1fabdc2..39356b1 100644
--- a/test/cctest/test-disasm-arm.cc
+++ b/test/cctest/test-disasm-arm.cc
@@ -686,6 +686,30 @@
 }
 
 
+TEST(ARMv8_vrintX_disasm) {
+  SET_UP();
+
+  if (CpuFeatures::IsSupported(ARMv8)) {
+    COMPARE(vrinta(d0, d0), "feb80b40       vrinta.f64.f64 d0, d0");
+    COMPARE(vrinta(d2, d3), "feb82b43       vrinta.f64.f64 d2, d3");
+
+    COMPARE(vrintp(d0, d0), "feba0b40       vrintp.f64.f64 d0, d0");
+    COMPARE(vrintp(d2, d3), "feba2b43       vrintp.f64.f64 d2, d3");
+
+    COMPARE(vrintn(d0, d0), "feb90b40       vrintn.f64.f64 d0, d0");
+    COMPARE(vrintn(d2, d3), "feb92b43       vrintn.f64.f64 d2, d3");
+
+    COMPARE(vrintm(d0, d0), "febb0b40       vrintm.f64.f64 d0, d0");
+    COMPARE(vrintm(d2, d3), "febb2b43       vrintm.f64.f64 d2, d3");
+
+    COMPARE(vrintz(d0, d0), "eeb60bc0       vrintz.f64.f64 d0, d0");
+    COMPARE(vrintz(d2, d3, ne), "1eb62bc3       vrintzne.f64.f64 d2, d3");
+  }
+
+  VERIFY_RUN();
+}
+
+
 TEST(Neon) {
   SET_UP();
 
diff --git a/test/mjsunit/asm/math-ceil.js b/test/mjsunit/asm/math-ceil.js
new file mode 100644
index 0000000..edb9493
--- /dev/null
+++ b/test/mjsunit/asm/math-ceil.js
@@ -0,0 +1,38 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function Module(stdlib) {
+  "use asm";
+
+  var ceil = stdlib.Math.ceil;
+
+  // f: double -> float
+  function f(a) {
+    a = +a;
+    return ceil(a);
+  }
+
+  return { f: f };
+}
+
+var f = Module({ Math: Math }).f;
+
+assertTrue(isNaN(f(NaN)));
+assertTrue(isNaN(f(undefined)));
+assertTrue(isNaN(f(function() {})));
+
+assertEquals(0,                   f(0));
+assertEquals(+0,                  f(+0));
+assertEquals(-0,                  f(-0));
+assertEquals(1,                   f(0.49999));
+assertEquals(1,                   f(0.6));
+assertEquals(1,                   f(0.5));
+assertEquals(-0,                  f(-0.1));
+assertEquals(-0,                  f(-0.5));
+assertEquals(-0,                  f(-0.6));
+assertEquals(-1,                  f(-1.6));
+assertEquals(-0,                  f(-0.50001));
+
+assertEquals("Infinity", String(f(Infinity)));
+assertEquals("-Infinity", String(f(-Infinity)));
diff --git a/test/mjsunit/asm/math-floor.js b/test/mjsunit/asm/math-floor.js
new file mode 100644
index 0000000..e8c3f34
--- /dev/null
+++ b/test/mjsunit/asm/math-floor.js
@@ -0,0 +1,38 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function Module(stdlib) {
+  "use asm";
+
+  var floor = stdlib.Math.floor;
+
+  // f: double -> float
+  function f(a) {
+    a = +a;
+    return floor(a);
+  }
+
+  return { f: f };
+}
+
+var f = Module({ Math: Math }).f;
+
+assertTrue(isNaN(f(NaN)));
+assertTrue(isNaN(f(undefined)));
+assertTrue(isNaN(f(function() {})));
+
+assertEquals(0,                   f(0));
+assertEquals(+0,                  f(+0));
+assertEquals(-0,                  f(-0));
+assertEquals(0,                   f(0.49999));
+assertEquals(+0,                  f(0.6));
+assertEquals(+0,                  f(0.5));
+assertEquals(-1,                  f(-0.1));
+assertEquals(-1,                  f(-0.5));
+assertEquals(-1,                  f(-0.6));
+assertEquals(-2,                  f(-1.6));
+assertEquals(-1,                  f(-0.50001));
+
+assertEquals("Infinity", String(f(Infinity)));
+assertEquals("-Infinity", String(f(-Infinity)));
diff --git a/test/mjsunit/asm/uint32-less-than-shift.js b/test/mjsunit/asm/uint32-less-than-shift.js
new file mode 100644
index 0000000..7384e21
--- /dev/null
+++ b/test/mjsunit/asm/uint32-less-than-shift.js
@@ -0,0 +1,61 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function Module(stdlib, foreign, heap) {
+  'use asm';
+
+  function foo1(i1) {
+    i1 = i1 | 0;
+    var i10 = i1 >> 5;
+    if (i10 >>> 0 < 5) {
+      return 1;
+    } else {
+      return 0;
+    }
+    return 0;
+  }
+
+  function foo2(i1) {
+    i1 = i1 | 0;
+    var i10 = i1 / 32 | 0;
+    if (i10 >>> 0 < 5) {
+      return 1;
+    } else {
+      return 0;
+    }
+    return 0;
+  }
+
+  function foo3(i1) {
+    i1 = i1 | 0;
+    var i10 = (i1 + 32 | 0) / 32 | 0;
+    if (i10 >>> 0 < 5) {
+      return 1;
+    } else {
+      return 0;
+    }
+    return 0;
+  }
+  return {foo1: foo1, foo2: foo2, foo3: foo3};
+}
+
+var m = Module(this, {}, undefined);
+
+for (var i = 0; i < 4 * 32; i++) {
+  assertEquals(1, m.foo1(i));
+  assertEquals(1, m.foo2(i));
+  assertEquals(1, m.foo3(i));
+}
+
+for (var i = 4 * 32; i < 5 * 32; i++) {
+  assertEquals(1, m.foo1(i));
+  assertEquals(1, m.foo2(i));
+  assertEquals(0, m.foo3(i));
+}
+
+for (var i = 5 * 32; i < 10 * 32; i++) {
+  assertEquals(0, m.foo1(i));
+  assertEquals(0, m.foo2(i));
+  assertEquals(0, m.foo3(i));
+}
diff --git a/test/mjsunit/debug-step-turbofan.js b/test/mjsunit/debug-step-turbofan.js
new file mode 100644
index 0000000..c8c346b
--- /dev/null
+++ b/test/mjsunit/debug-step-turbofan.js
@@ -0,0 +1,57 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --turbo-filter=g --allow-natives-syntax
+
+// Test that Debug::PrepareForBreakPoints can deal with turbofan code (g)
+// on the stack.  Without deoptimization support, we will not be able to
+// replace optimized code for g by unoptimized code with debug break slots.
+// This would cause stepping to fail (V8 issue 3660).
+
+function f(x) {
+  g(x);
+  var a = 0;              // Break 6
+  return a;               // Break 7
+}                         // Break 8
+
+function g(x) {
+  if (x) h();
+  var a = 0;              // Break 2
+  var b = 1;              // Break 3
+  return a + b;           // Break 4
+}                         // Break 5
+
+function h() {
+  debugger;               // Break 0
+}                         // Break 1
+
+Debug = debug.Debug;
+var exception = null;
+var break_count = 0;
+
+function listener(event, exec_state, event_data, data) {
+  if (event != Debug.DebugEvent.Break) return;
+  try {
+    exec_state.prepareStep(Debug.StepAction.StepNext, 1);
+    print(exec_state.frame(0).sourceLineText());
+    var match = exec_state.frame(0).sourceLineText().match(/Break (\d)/);
+    assertNotNull(match);
+    assertEquals(break_count++, parseInt(match[1]));
+  } catch (e) {
+    print(e + e.stack);
+    exception = e;
+  }
+}
+
+f(0);
+f(0);
+%OptimizeFunctionOnNextCall(g);
+
+Debug.setListener(listener);
+
+f(1);
+
+Debug.setListener(null);  // Break 9
+assertNull(exception);
+assertEquals(10, break_count);
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index aa263aa..e79627d 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -81,6 +81,10 @@
   'compiler/osr-assert': [PASS, NO_VARIANTS],
   'regress/regress-2185-2': [PASS, NO_VARIANTS],
 
+  # Issue 3660: Replacing activated TurboFan frames by unoptimized code does
+  # not work, but we expect it to not crash.
+  'debug-step-turbofan': [PASS, FAIL],
+
   # Support for %GetFrameDetails is missing and requires checkpoints.
   'debug-evaluate-bool-constructor': [PASS, NO_VARIANTS],
   'debug-evaluate-const': [PASS, NO_VARIANTS],
diff --git a/test/mjsunit/regress/regress-crbug-410033.js b/test/mjsunit/regress/regress-crbug-410033.js
new file mode 100644
index 0000000..63693e6
--- /dev/null
+++ b/test/mjsunit/regress/regress-crbug-410033.js
@@ -0,0 +1,7 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-gc
+
+%GetScript('v8/gc');
diff --git a/test/mjsunit/serialize-embedded-error.js b/test/mjsunit/serialize-embedded-error.js
new file mode 100644
index 0000000..473c931
--- /dev/null
+++ b/test/mjsunit/serialize-embedded-error.js
@@ -0,0 +1,13 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// --serialize-toplevel --cache=code
+
+var caught = false;
+try {
+  parseInt() = 0;
+} catch(e) {
+  caught = true;
+}
+assertTrue(caught);
diff --git a/test/mozilla/mozilla.status b/test/mozilla/mozilla.status
index 00ba050..077662e 100644
--- a/test/mozilla/mozilla.status
+++ b/test/mozilla/mozilla.status
@@ -221,7 +221,8 @@
 
   # Test that depends on timer resolution. Fails every now and then
   # if we're unlucky enough to get a context switch at a bad time.
-  'js1_5/extensions/regress-363258': [PASS, FAIL],
+  # TODO(mstarzinger): Switch off TF on windows due to timeouts.
+  'js1_5/extensions/regress-363258': [PASS, FAIL, ['system == windows', NO_VARIANTS]],
 
 
   # Test that assumes specific runtime for a regexp, flaky in debug mode.
diff --git a/test/unittests/compiler/change-lowering-unittest.cc b/test/unittests/compiler/change-lowering-unittest.cc
index ed57513..bfee756 100644
--- a/test/unittests/compiler/change-lowering-unittest.cc
+++ b/test/unittests/compiler/change-lowering-unittest.cc
@@ -2,6 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "src/code-stubs.h"
 #include "src/compiler/change-lowering.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/node-properties-inl.h"
@@ -89,9 +90,12 @@
   Matcher<Node*> IsLoadHeapNumber(const Matcher<Node*>& value_matcher,
                                   const Matcher<Node*>& control_matcher) {
     return IsLoad(kMachFloat64, value_matcher,
-                  IsInt32Constant(HeapNumberValueOffset()), graph()->start(),
+                  IsIntPtrConstant(HeapNumberValueOffset()), graph()->start(),
                   control_matcher);
   }
+  Matcher<Node*> IsIntPtrConstant(int value) {
+    return Is32() ? IsInt32Constant(value) : IsInt64Constant(value);
+  }
   Matcher<Node*> IsWordEqual(const Matcher<Node*>& lhs_matcher,
                              const Matcher<Node*>& rhs_matcher) {
     return Is32() ? IsWord32Equal(lhs_matcher, rhs_matcher)
@@ -161,7 +165,7 @@
                 IsAllocateHeapNumber(IsValueEffect(val), graph()->start())),
           IsStore(StoreRepresentation(kMachFloat64, kNoWriteBarrier),
                   CaptureEq(&heap_number),
-                  IsInt32Constant(HeapNumberValueOffset()), val,
+                  IsIntPtrConstant(HeapNumberValueOffset()), val,
                   CaptureEq(&heap_number), graph()->start())));
 }
 
@@ -206,7 +210,7 @@
                            IsAllocateHeapNumber(_, CaptureEq(&if_true))),
                      IsStore(StoreRepresentation(kMachFloat64, kNoWriteBarrier),
                              CaptureEq(&heap_number),
-                             IsInt32Constant(HeapNumberValueOffset()),
+                             IsIntPtrConstant(HeapNumberValueOffset()),
                              IsChangeInt32ToFloat64(val),
                              CaptureEq(&heap_number), CaptureEq(&if_true))),
             IsProjection(
@@ -345,7 +349,7 @@
 
   EXPECT_THAT(reduction.replacement(),
               IsWord64Shl(IsChangeInt32ToInt64(val),
-                          IsInt32Constant(SmiShiftAmount())));
+                          IsInt64Constant(SmiShiftAmount())));
 }
 
 
@@ -365,12 +369,12 @@
       IsPhi(
           kMachFloat64, IsLoadHeapNumber(val, CaptureEq(&if_true)),
           IsChangeInt32ToFloat64(IsTruncateInt64ToInt32(
-              IsWord64Sar(val, IsInt32Constant(SmiShiftAmount())))),
+              IsWord64Sar(val, IsInt64Constant(SmiShiftAmount())))),
           IsMerge(
               AllOf(CaptureEq(&if_true),
                     IsIfTrue(AllOf(
                         CaptureEq(&branch),
-                        IsBranch(IsWord64And(val, IsInt32Constant(kSmiTagMask)),
+                        IsBranch(IsWord64And(val, IsInt64Constant(kSmiTagMask)),
                                  graph()->start())))),
               IsIfFalse(CaptureEq(&branch)))));
 }
@@ -392,11 +396,11 @@
       IsPhi(kMachInt32,
             IsChangeFloat64ToInt32(IsLoadHeapNumber(val, CaptureEq(&if_true))),
             IsTruncateInt64ToInt32(
-                IsWord64Sar(val, IsInt32Constant(SmiShiftAmount()))),
+                IsWord64Sar(val, IsInt64Constant(SmiShiftAmount()))),
             IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
                     IsIfFalse(AllOf(
                         CaptureEq(&branch),
-                        IsBranch(IsWord64And(val, IsInt32Constant(kSmiTagMask)),
+                        IsBranch(IsWord64And(val, IsInt64Constant(kSmiTagMask)),
                                  graph()->start()))))));
 }
 
@@ -417,11 +421,11 @@
       IsPhi(kMachUint32,
             IsChangeFloat64ToUint32(IsLoadHeapNumber(val, CaptureEq(&if_true))),
             IsTruncateInt64ToInt32(
-                IsWord64Sar(val, IsInt32Constant(SmiShiftAmount()))),
+                IsWord64Sar(val, IsInt64Constant(SmiShiftAmount()))),
             IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
                     IsIfFalse(AllOf(
                         CaptureEq(&branch),
-                        IsBranch(IsWord64And(val, IsInt32Constant(kSmiTagMask)),
+                        IsBranch(IsWord64And(val, IsInt64Constant(kSmiTagMask)),
                                  graph()->start()))))));
 }
 
@@ -441,18 +445,18 @@
       phi,
       IsPhi(
           kMachAnyTagged, IsWord64Shl(IsChangeUint32ToUint64(val),
-                                      IsInt32Constant(SmiShiftAmount())),
+                                      IsInt64Constant(SmiShiftAmount())),
           IsFinish(AllOf(CaptureEq(&heap_number),
                          IsAllocateHeapNumber(_, CaptureEq(&if_false))),
                    IsStore(StoreRepresentation(kMachFloat64, kNoWriteBarrier),
                            CaptureEq(&heap_number),
-                           IsInt32Constant(HeapNumberValueOffset()),
+                           IsInt64Constant(HeapNumberValueOffset()),
                            IsChangeUint32ToFloat64(val),
                            CaptureEq(&heap_number), CaptureEq(&if_false))),
           IsMerge(
               IsIfTrue(AllOf(CaptureEq(&branch),
                              IsBranch(IsUint32LessThanOrEqual(
-                                          val, IsInt32Constant(SmiMaxValue())),
+                                          val, IsInt64Constant(SmiMaxValue())),
                                       graph()->start()))),
               AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
 }
diff --git a/test/unittests/compiler/instruction-selector-unittest.cc b/test/unittests/compiler/instruction-selector-unittest.cc
index 0c5cdc5..ba420f7 100644
--- a/test/unittests/compiler/instruction-selector-unittest.cc
+++ b/test/unittests/compiler/instruction-selector-unittest.cc
@@ -37,10 +37,10 @@
   EXPECT_NE(0, graph()->NodeCount());
   int initial_node_count = graph()->NodeCount();
   Linkage linkage(test_->zone(), call_descriptor());
-  InstructionSequence sequence(test_->zone(), graph(), schedule);
+  InstructionSequence sequence(test_->zone(), schedule);
   SourcePositionTable source_position_table(graph());
-  InstructionSelector selector(test_->zone(), &linkage, &sequence, schedule,
-                               &source_position_table, features);
+  InstructionSelector selector(test_->zone(), graph(), &linkage, &sequence,
+                               schedule, &source_position_table, features);
   selector.SelectInstructions();
   if (FLAG_trace_turbo) {
     OFStream out(stdout);
@@ -50,9 +50,9 @@
   Stream s;
   // Map virtual registers.
   {
-    const NodeToVregMap& node_map = sequence.GetNodeMapForTesting();
+    const NodeToVregMap& node_map = selector.GetNodeMapForTesting();
     for (int i = 0; i < initial_node_count; ++i) {
-      if (node_map[i] != InstructionSequence::kNodeUnmapped) {
+      if (node_map[i] != InstructionSelector::kNodeUnmapped) {
         s.virtual_registers_.insert(std::make_pair(i, node_map[i]));
       }
     }
@@ -138,6 +138,14 @@
 }
 
 
+bool InstructionSelectorTest::Stream::IsSameAsFirst(
+    const InstructionOperand* operand) const {
+  if (!operand->IsUnallocated()) return false;
+  const UnallocatedOperand* unallocated = UnallocatedOperand::cast(operand);
+  return unallocated->HasSameAsInputPolicy();
+}
+
+
 bool InstructionSelectorTest::Stream::IsUsedAtStart(
     const InstructionOperand* operand) const {
   if (!operand->IsUnallocated()) return false;
diff --git a/test/unittests/compiler/instruction-selector-unittest.h b/test/unittests/compiler/instruction-selector-unittest.h
index 4e49862..073af15 100644
--- a/test/unittests/compiler/instruction-selector-unittest.h
+++ b/test/unittests/compiler/instruction-selector-unittest.h
@@ -11,6 +11,7 @@
 #include "src/base/utils/random-number-generator.h"
 #include "src/compiler/instruction-selector.h"
 #include "src/compiler/raw-machine-assembler.h"
+#include "src/macro-assembler.h"
 #include "test/unittests/test-utils.h"
 
 namespace v8 {
@@ -171,6 +172,7 @@
     int ToVreg(const Node* node) const;
 
     bool IsFixed(const InstructionOperand* operand, Register reg) const;
+    bool IsSameAsFirst(const InstructionOperand* operand) const;
     bool IsUsedAtStart(const InstructionOperand* operand) const;
 
     FrameStateDescriptor* GetFrameStateDescriptor(int deoptimization_id) {
diff --git a/test/unittests/compiler/js-builtin-reducer-unittest.cc b/test/unittests/compiler/js-builtin-reducer-unittest.cc
index 26cabc3..6cfcc26 100644
--- a/test/unittests/compiler/js-builtin-reducer-unittest.cc
+++ b/test/unittests/compiler/js-builtin-reducer-unittest.cc
@@ -21,8 +21,9 @@
   JSBuiltinReducerTest() : javascript_(zone()) {}
 
  protected:
-  Reduction Reduce(Node* node) {
-    MachineOperatorBuilder machine;
+  Reduction Reduce(Node* node, MachineOperatorBuilder::Flags flags =
+                                   MachineOperatorBuilder::Flag::kNoFlags) {
+    MachineOperatorBuilder machine(kMachPtr, flags);
     JSGraph jsgraph(graph(), common(), javascript(), &machine);
     JSBuiltinReducer reducer(&jsgraph);
     return reducer.Reduce(node);
@@ -237,6 +238,79 @@
   }
 }
 
+
+// -----------------------------------------------------------------------------
+// Math.floor
+
+
+TEST_F(JSBuiltinReducerTest, MathFloorAvailable) {
+  Handle<JSFunction> f = MathFunction("floor");
+
+  TRACED_FOREACH(Type*, t0, kNumberTypes) {
+    Node* p0 = Parameter(t0, 0);
+    Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+    Node* call =
+        graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
+                         fun, UndefinedConstant(), p0);
+    Reduction r = Reduce(call, MachineOperatorBuilder::Flag::kFloat64Floor);
+
+    ASSERT_TRUE(r.Changed());
+    EXPECT_THAT(r.replacement(), IsFloat64Floor(p0));
+  }
+}
+
+
+TEST_F(JSBuiltinReducerTest, MathFloorUnavailable) {
+  Handle<JSFunction> f = MathFunction("floor");
+
+  TRACED_FOREACH(Type*, t0, kNumberTypes) {
+    Node* p0 = Parameter(t0, 0);
+    Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+    Node* call =
+        graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
+                         fun, UndefinedConstant(), p0);
+    Reduction r = Reduce(call, MachineOperatorBuilder::Flag::kNoFlags);
+
+    ASSERT_FALSE(r.Changed());
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Math.ceil
+
+
+TEST_F(JSBuiltinReducerTest, MathCeilAvailable) {
+  Handle<JSFunction> f = MathFunction("ceil");
+
+  TRACED_FOREACH(Type*, t0, kNumberTypes) {
+    Node* p0 = Parameter(t0, 0);
+    Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+    Node* call =
+        graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
+                         fun, UndefinedConstant(), p0);
+    Reduction r = Reduce(call, MachineOperatorBuilder::Flag::kFloat64Ceil);
+
+    ASSERT_TRUE(r.Changed());
+    EXPECT_THAT(r.replacement(), IsFloat64Ceil(p0));
+  }
+}
+
+
+TEST_F(JSBuiltinReducerTest, MathCeilUnavailable) {
+  Handle<JSFunction> f = MathFunction("ceil");
+
+  TRACED_FOREACH(Type*, t0, kNumberTypes) {
+    Node* p0 = Parameter(t0, 0);
+    Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+    Node* call =
+        graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
+                         fun, UndefinedConstant(), p0);
+    Reduction r = Reduce(call, MachineOperatorBuilder::Flag::kNoFlags);
+
+    ASSERT_FALSE(r.Changed());
+  }
+}
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/test/unittests/compiler/machine-operator-reducer-unittest.cc b/test/unittests/compiler/machine-operator-reducer-unittest.cc
index fff6f96..be22bfd 100644
--- a/test/unittests/compiler/machine-operator-reducer-unittest.cc
+++ b/test/unittests/compiler/machine-operator-reducer-unittest.cc
@@ -1089,10 +1089,9 @@
 
     Reduction r = Reduce(node);
     ASSERT_TRUE(r.Changed());
-    EXPECT_THAT(
-        r.replacement(),
-        IsUint32LessThan(p0, IsInt32Constant(bit_cast<int32_t>(
-                                 (limit << shift) | ((1u << shift) - 1)))));
+    EXPECT_THAT(r.replacement(),
+                IsUint32LessThan(
+                    p0, IsInt32Constant(bit_cast<int32_t>(limit << shift))));
   }
 }
 
diff --git a/test/unittests/compiler/node-test-utils.cc b/test/unittests/compiler/node-test-utils.cc
index a7308f9..4c1e10b 100644
--- a/test/unittests/compiler/node-test-utils.cc
+++ b/test/unittests/compiler/node-test-utils.cc
@@ -949,6 +949,10 @@
 IS_UNOP_MATCHER(TruncateFloat64ToInt32)
 IS_UNOP_MATCHER(TruncateInt64ToInt32)
 IS_UNOP_MATCHER(Float64Sqrt)
+IS_UNOP_MATCHER(Float64Floor)
+IS_UNOP_MATCHER(Float64Ceil)
+IS_UNOP_MATCHER(Float64RoundTruncate)
+IS_UNOP_MATCHER(Float64RoundTiesAway)
 #undef IS_UNOP_MATCHER
 
 }  // namespace compiler
diff --git a/test/unittests/compiler/node-test-utils.h b/test/unittests/compiler/node-test-utils.h
index da05a10..7f153bd 100644
--- a/test/unittests/compiler/node-test-utils.h
+++ b/test/unittests/compiler/node-test-utils.h
@@ -149,6 +149,10 @@
 Matcher<Node*> IsTruncateFloat64ToInt32(const Matcher<Node*>& input_matcher);
 Matcher<Node*> IsTruncateInt64ToInt32(const Matcher<Node*>& input_matcher);
 Matcher<Node*> IsFloat64Sqrt(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat64Floor(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat64Ceil(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat64RoundTruncate(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat64RoundTiesAway(const Matcher<Node*>& input_matcher);
 
 }  // namespace compiler
 }  // namespace internal
diff --git a/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc b/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
index dddb70c..e51c86b 100644
--- a/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
+++ b/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
@@ -10,16 +10,6 @@
 namespace internal {
 namespace compiler {
 
-namespace {
-
-// Immediates (random subset).
-static const int32_t kImmediates[] = {
-    kMinInt, -42, -1, 0,  1,  2,    3,      4,          5,
-    6,       7,   8,  16, 42, 0xff, 0xffff, 0x0f0f0f0f, kMaxInt};
-
-}  // namespace
-
-
 // -----------------------------------------------------------------------------
 // Conversions.
 
@@ -83,39 +73,6 @@
 
 
 // -----------------------------------------------------------------------------
-// Better left operand for commutative binops
-
-TEST_F(InstructionSelectorTest, BetterLeftOperandTestAddBinop) {
-  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-  Node* param1 = m.Parameter(0);
-  Node* param2 = m.Parameter(1);
-  Node* add = m.Int32Add(param1, param2);
-  m.Return(m.Int32Add(add, param1));
-  Stream s = m.Build();
-  ASSERT_EQ(2U, s.size());
-  EXPECT_EQ(kX64Add32, s[0]->arch_opcode());
-  ASSERT_EQ(2U, s[0]->InputCount());
-  ASSERT_TRUE(s[0]->InputAt(0)->IsUnallocated());
-  EXPECT_EQ(s.ToVreg(param2), s.ToVreg(s[0]->InputAt(0)));
-}
-
-
-TEST_F(InstructionSelectorTest, BetterLeftOperandTestMulBinop) {
-  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
-  Node* param1 = m.Parameter(0);
-  Node* param2 = m.Parameter(1);
-  Node* mul = m.Int32Mul(param1, param2);
-  m.Return(m.Int32Mul(mul, param1));
-  Stream s = m.Build();
-  ASSERT_EQ(2U, s.size());
-  EXPECT_EQ(kX64Imul32, s[0]->arch_opcode());
-  ASSERT_EQ(2U, s[0]->InputCount());
-  ASSERT_TRUE(s[0]->InputAt(0)->IsUnallocated());
-  EXPECT_EQ(s.ToVreg(param2), s.ToVreg(s[0]->InputAt(0)));
-}
-
-
-// -----------------------------------------------------------------------------
 // Loads and stores
 
 namespace {
@@ -181,324 +138,152 @@
                         ::testing::ValuesIn(kMemoryAccesses));
 
 // -----------------------------------------------------------------------------
-// AddressingMode for loads and stores.
+// ChangeUint32ToUint64.
 
-class AddressingModeUnitTest : public InstructionSelectorTest {
- public:
-  AddressingModeUnitTest() : m(NULL) { Reset(); }
-  ~AddressingModeUnitTest() { delete m; }
 
-  void Run(Node* base, Node* index, AddressingMode mode) {
-    Node* load = m->Load(kMachInt32, base, index);
-    m->Store(kMachInt32, base, index, load);
-    m->Return(m->Int32Constant(0));
-    Stream s = m->Build();
-    ASSERT_EQ(2U, s.size());
-    EXPECT_EQ(mode, s[0]->addressing_mode());
-    EXPECT_EQ(mode, s[1]->addressing_mode());
-  }
+namespace {
 
-  Node* zero;
-  Node* null_ptr;
-  Node* non_zero;
-  Node* base_reg;   // opaque value to generate base as register
-  Node* index_reg;  // opaque value to generate index as register
-  Node* scales[arraysize(ScaleFactorMatcher::kMatchedFactors)];
-  StreamBuilder* m;
+typedef Node* (RawMachineAssembler::*Constructor)(Node*, Node*);
 
-  void Reset() {
-    delete m;
-    m = new StreamBuilder(this, kMachInt32, kMachInt32, kMachInt32);
-    zero = m->Int32Constant(0);
-    null_ptr = m->Int64Constant(0);
-    non_zero = m->Int32Constant(127);
-    base_reg = m->Parameter(0);
-    index_reg = m->Parameter(0);
-    for (size_t i = 0; i < arraysize(ScaleFactorMatcher::kMatchedFactors);
-         ++i) {
-      scales[i] = m->Int32Constant(ScaleFactorMatcher::kMatchedFactors[i]);
-    }
-  }
+
+struct BinaryOperation {
+  Constructor constructor;
+  const char* constructor_name;
 };
 
 
-TEST_F(AddressingModeUnitTest, AddressingMode_MR) {
-  Node* base = base_reg;
-  Node* index = zero;
-  Run(base, index, kMode_MR);
+std::ostream& operator<<(std::ostream& os, const BinaryOperation& bop) {
+  return os << bop.constructor_name;
 }
 
 
-TEST_F(AddressingModeUnitTest, AddressingMode_MRI) {
-  Node* base = base_reg;
-  Node* index = non_zero;
-  Run(base, index, kMode_MRI);
+const BinaryOperation kWord32BinaryOperations[] = {
+    {&RawMachineAssembler::Word32And, "Word32And"},
+    {&RawMachineAssembler::Word32Or, "Word32Or"},
+    {&RawMachineAssembler::Word32Xor, "Word32Xor"},
+    {&RawMachineAssembler::Word32Shl, "Word32Shl"},
+    {&RawMachineAssembler::Word32Shr, "Word32Shr"},
+    {&RawMachineAssembler::Word32Sar, "Word32Sar"},
+    {&RawMachineAssembler::Word32Ror, "Word32Ror"},
+    {&RawMachineAssembler::Word32Equal, "Word32Equal"},
+    {&RawMachineAssembler::Int32Add, "Int32Add"},
+    {&RawMachineAssembler::Int32Sub, "Int32Sub"},
+    {&RawMachineAssembler::Int32Mul, "Int32Mul"},
+    {&RawMachineAssembler::Int32MulHigh, "Int32MulHigh"},
+    {&RawMachineAssembler::Int32Div, "Int32Div"},
+    {&RawMachineAssembler::Int32LessThan, "Int32LessThan"},
+    {&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual"},
+    {&RawMachineAssembler::Int32Mod, "Int32Mod"},
+    {&RawMachineAssembler::Uint32Div, "Uint32Div"},
+    {&RawMachineAssembler::Uint32LessThan, "Uint32LessThan"},
+    {&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual"},
+    {&RawMachineAssembler::Uint32Mod, "Uint32Mod"}};
+
+}  // namespace
+
+
+typedef InstructionSelectorTestWithParam<BinaryOperation>
+    InstructionSelectorChangeUint32ToUint64Test;
+
+
+TEST_P(InstructionSelectorChangeUint32ToUint64Test, ChangeUint32ToUint64) {
+  const BinaryOperation& bop = GetParam();
+  StreamBuilder m(this, kMachUint64, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  m.Return(m.ChangeUint32ToUint64((m.*bop.constructor)(p0, p1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
 }
 
 
-TEST_F(AddressingModeUnitTest, AddressingMode_MR1) {
-  Node* base = base_reg;
-  Node* index = index_reg;
-  Run(base, index, kMode_MR1);
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+                        InstructionSelectorChangeUint32ToUint64Test,
+                        ::testing::ValuesIn(kWord32BinaryOperations));
+
+
+// -----------------------------------------------------------------------------
+// TruncateInt64ToInt32.
+
+
+TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Sar) {
+  StreamBuilder m(this, kMachInt32, kMachInt64);
+  Node* const p = m.Parameter(0);
+  Node* const t = m.TruncateInt64ToInt32(m.Word64Sar(p, m.Int64Constant(32)));
+  m.Return(t);
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Shr, s[0]->arch_opcode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(32, s.ToInt32(s[0]->InputAt(1)));
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  EXPECT_TRUE(s.IsSameAsFirst(s[0]->OutputAt(0)));
+  EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
 }
 
 
-TEST_F(AddressingModeUnitTest, AddressingMode_MRN) {
-  AddressingMode expected[] = {kMode_MR1, kMode_MR2, kMode_MR4, kMode_MR8};
-  for (size_t i = 0; i < arraysize(scales); ++i) {
-    Reset();
-    Node* base = base_reg;
-    Node* index = m->Int32Mul(index_reg, scales[i]);
-    Run(base, index, expected[i]);
-  }
+TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Shr) {
+  StreamBuilder m(this, kMachInt32, kMachInt64);
+  Node* const p = m.Parameter(0);
+  Node* const t = m.TruncateInt64ToInt32(m.Word64Shr(p, m.Int64Constant(32)));
+  m.Return(t);
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Shr, s[0]->arch_opcode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(32, s.ToInt32(s[0]->InputAt(1)));
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  EXPECT_TRUE(s.IsSameAsFirst(s[0]->OutputAt(0)));
+  EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
 }
 
 
-TEST_F(AddressingModeUnitTest, AddressingMode_MR1I) {
-  Node* base = base_reg;
-  Node* index = m->Int32Add(index_reg, non_zero);
-  Run(base, index, kMode_MR1I);
-}
+// -----------------------------------------------------------------------------
+// Addition.
 
 
-TEST_F(AddressingModeUnitTest, AddressingMode_MRNI) {
-  AddressingMode expected[] = {kMode_MR1I, kMode_MR2I, kMode_MR4I, kMode_MR8I};
-  for (size_t i = 0; i < arraysize(scales); ++i) {
-    Reset();
-    Node* base = base_reg;
-    Node* index = m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero);
-    Run(base, index, expected[i]);
-  }
-}
-
-
-TEST_F(AddressingModeUnitTest, AddressingMode_M1) {
-  Node* base = null_ptr;
-  Node* index = index_reg;
-  Run(base, index, kMode_M1);
-}
-
-
-TEST_F(AddressingModeUnitTest, AddressingMode_MN) {
-  AddressingMode expected[] = {kMode_M1, kMode_M2, kMode_M4, kMode_M8};
-  for (size_t i = 0; i < arraysize(scales); ++i) {
-    Reset();
-    Node* base = null_ptr;
-    Node* index = m->Int32Mul(index_reg, scales[i]);
-    Run(base, index, expected[i]);
-  }
-}
-
-
-TEST_F(AddressingModeUnitTest, AddressingMode_M1I) {
-  Node* base = null_ptr;
-  Node* index = m->Int32Add(index_reg, non_zero);
-  Run(base, index, kMode_M1I);
-}
-
-
-TEST_F(AddressingModeUnitTest, AddressingMode_MNI) {
-  AddressingMode expected[] = {kMode_M1I, kMode_M2I, kMode_M4I, kMode_M8I};
-  for (size_t i = 0; i < arraysize(scales); ++i) {
-    Reset();
-    Node* base = null_ptr;
-    Node* index = m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero);
-    Run(base, index, expected[i]);
-  }
+TEST_F(InstructionSelectorTest, Int32AddWithInt32AddWithParameters) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const a0 = m.Int32Add(p0, p1);
+  m.Return(m.Int32Add(a0, p0));
+  Stream s = m.Build();
+  ASSERT_EQ(2U, s.size());
+  EXPECT_EQ(kX64Add32, s[0]->arch_opcode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
 }
 
 
 // -----------------------------------------------------------------------------
 // Multiplication.
 
-namespace {
 
-struct MultParam {
-  int value;
-  bool lea_expected;
-  AddressingMode addressing_mode;
-};
-
-
-std::ostream& operator<<(std::ostream& os, const MultParam& m) {
-  return os << m.value << "." << m.lea_expected << "." << m.addressing_mode;
-}
-
-
-const MultParam kMultParams[] = {{-1, false, kMode_None},
-                                 {0, false, kMode_None},
-                                 {1, true, kMode_M1},
-                                 {2, true, kMode_M2},
-                                 {3, true, kMode_MR2},
-                                 {4, true, kMode_M4},
-                                 {5, true, kMode_MR4},
-                                 {6, false, kMode_None},
-                                 {7, false, kMode_None},
-                                 {8, true, kMode_M8},
-                                 {9, true, kMode_MR8},
-                                 {10, false, kMode_None},
-                                 {11, false, kMode_None}};
-
-}  // namespace
-
-
-typedef InstructionSelectorTestWithParam<MultParam> InstructionSelectorMultTest;
-
-
-static unsigned InputCountForLea(AddressingMode mode) {
-  switch (mode) {
-    case kMode_MR1I:
-    case kMode_MR2I:
-    case kMode_MR4I:
-    case kMode_MR8I:
-      return 3U;
-    case kMode_M1I:
-    case kMode_M2I:
-    case kMode_M4I:
-    case kMode_M8I:
-      return 2U;
-    case kMode_MR1:
-    case kMode_MR2:
-    case kMode_MR4:
-    case kMode_MR8:
-      return 2U;
-    case kMode_M1:
-    case kMode_M2:
-    case kMode_M4:
-    case kMode_M8:
-      return 1U;
-    default:
-      UNREACHABLE();
-      return 0U;
-  }
-}
-
-
-static AddressingMode AddressingModeForAddMult(const MultParam& m) {
-  switch (m.addressing_mode) {
-    case kMode_MR1:
-      return kMode_MR1I;
-    case kMode_MR2:
-      return kMode_MR2I;
-    case kMode_MR4:
-      return kMode_MR4I;
-    case kMode_MR8:
-      return kMode_MR8I;
-    case kMode_M1:
-      return kMode_M1I;
-    case kMode_M2:
-      return kMode_M2I;
-    case kMode_M4:
-      return kMode_M4I;
-    case kMode_M8:
-      return kMode_M8I;
-    default:
-      UNREACHABLE();
-      return kMode_None;
-  }
-}
-
-
-TEST_P(InstructionSelectorMultTest, Mult32) {
-  const MultParam m_param = GetParam();
-  StreamBuilder m(this, kMachInt32, kMachInt32);
-  Node* param = m.Parameter(0);
-  Node* mult = m.Int32Mul(param, m.Int32Constant(m_param.value));
-  m.Return(mult);
+TEST_F(InstructionSelectorTest, Int32MulWithInt32MulWithParameters) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const m0 = m.Int32Mul(p0, p1);
+  m.Return(m.Int32Mul(m0, p0));
   Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(m_param.addressing_mode, s[0]->addressing_mode());
-  if (m_param.lea_expected) {
-    EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
-    ASSERT_EQ(InputCountForLea(s[0]->addressing_mode()), s[0]->InputCount());
-  } else {
-    EXPECT_EQ(kX64Imul32, s[0]->arch_opcode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-  }
-  EXPECT_EQ(s.ToVreg(param), s.ToVreg(s[0]->InputAt(0)));
+  ASSERT_EQ(2U, s.size());
+  EXPECT_EQ(kX64Imul32, s[0]->arch_opcode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(s.ToVreg(m0), s.ToVreg(s[0]->OutputAt(0)));
+  EXPECT_EQ(kX64Imul32, s[1]->arch_opcode());
+  ASSERT_EQ(2U, s[1]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[1]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(m0), s.ToVreg(s[1]->InputAt(1)));
 }
 
 
-TEST_P(InstructionSelectorMultTest, Mult64) {
-  const MultParam m_param = GetParam();
-  StreamBuilder m(this, kMachInt64, kMachInt64);
-  Node* param = m.Parameter(0);
-  Node* mult = m.Int64Mul(param, m.Int64Constant(m_param.value));
-  m.Return(mult);
-  Stream s = m.Build();
-  ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(m_param.addressing_mode, s[0]->addressing_mode());
-  if (m_param.lea_expected) {
-    EXPECT_EQ(kX64Lea, s[0]->arch_opcode());
-    ASSERT_EQ(InputCountForLea(s[0]->addressing_mode()), s[0]->InputCount());
-    EXPECT_EQ(s.ToVreg(param), s.ToVreg(s[0]->InputAt(0)));
-  } else {
-    EXPECT_EQ(kX64Imul, s[0]->arch_opcode());
-    ASSERT_EQ(2U, s[0]->InputCount());
-    // TODO(dcarney): why is this happening?
-    EXPECT_EQ(s.ToVreg(param), s.ToVreg(s[0]->InputAt(1)));
-  }
-}
-
-
-TEST_P(InstructionSelectorMultTest, MultAdd32) {
-  TRACED_FOREACH(int32_t, imm, kImmediates) {
-    const MultParam m_param = GetParam();
-    StreamBuilder m(this, kMachInt32, kMachInt32);
-    Node* param = m.Parameter(0);
-    Node* mult = m.Int32Add(m.Int32Mul(param, m.Int32Constant(m_param.value)),
-                            m.Int32Constant(imm));
-    m.Return(mult);
-    Stream s = m.Build();
-    if (m_param.lea_expected) {
-      ASSERT_EQ(1U, s.size());
-      EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
-      EXPECT_EQ(AddressingModeForAddMult(m_param), s[0]->addressing_mode());
-      unsigned input_count = InputCountForLea(s[0]->addressing_mode());
-      ASSERT_EQ(input_count, s[0]->InputCount());
-      ASSERT_EQ(InstructionOperand::IMMEDIATE,
-                s[0]->InputAt(input_count - 1)->kind());
-      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(input_count - 1)));
-    } else {
-      ASSERT_EQ(2U, s.size());
-      EXPECT_EQ(kX64Imul32, s[0]->arch_opcode());
-      EXPECT_EQ(kX64Add32, s[1]->arch_opcode());
-    }
-  }
-}
-
-
-TEST_P(InstructionSelectorMultTest, MultAdd64) {
-  TRACED_FOREACH(int32_t, imm, kImmediates) {
-    const MultParam m_param = GetParam();
-    StreamBuilder m(this, kMachInt64, kMachInt64);
-    Node* param = m.Parameter(0);
-    Node* mult = m.Int64Add(m.Int64Mul(param, m.Int64Constant(m_param.value)),
-                            m.Int64Constant(imm));
-    m.Return(mult);
-    Stream s = m.Build();
-    if (m_param.lea_expected) {
-      ASSERT_EQ(1U, s.size());
-      EXPECT_EQ(kX64Lea, s[0]->arch_opcode());
-      EXPECT_EQ(AddressingModeForAddMult(m_param), s[0]->addressing_mode());
-      unsigned input_count = InputCountForLea(s[0]->addressing_mode());
-      ASSERT_EQ(input_count, s[0]->InputCount());
-      ASSERT_EQ(InstructionOperand::IMMEDIATE,
-                s[0]->InputAt(input_count - 1)->kind());
-      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(input_count - 1)));
-    } else {
-      ASSERT_EQ(2U, s.size());
-      EXPECT_EQ(kX64Imul, s[0]->arch_opcode());
-      EXPECT_EQ(kX64Add, s[1]->arch_opcode());
-    }
-  }
-}
-
-
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorMultTest,
-                        ::testing::ValuesIn(kMultParams));
-
-
 TEST_F(InstructionSelectorTest, Int32MulHigh) {
   StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
   Node* const p0 = m.Parameter(0);
diff --git a/test/webkit/webkit.status b/test/webkit/webkit.status
index 15c1eef..89a5cab 100644
--- a/test/webkit/webkit.status
+++ b/test/webkit/webkit.status
@@ -44,6 +44,9 @@
   'dfg-double-vote-fuzz': [SKIP],
   'reentrant-caching': [SKIP],
   'sort-large-array': [SKIP],
+  # Too slow on windows with --nocrankshaft.
+  # TODO(mstarzinger): Too slow with TF.
+  'array-iterate-backwards': [PASS, NO_VARIANTS],
 }],  # 'mode == debug'
 ['simulator', {
   # Skip tests that timeout with turbofan.
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index d66117c..d2d5e5a 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -463,7 +463,6 @@
         '../../src/compiler/node-aux-data.h',
         '../../src/compiler/node-cache.cc',
         '../../src/compiler/node-cache.h',
-        '../../src/compiler/node-matchers.cc',
         '../../src/compiler/node-matchers.h',
         '../../src/compiler/node-properties-inl.h',
         '../../src/compiler/node-properties.h',
diff --git a/tools/push-to-trunk/releases.py b/tools/push-to-trunk/releases.py
index 0cd9e61..53648a6 100755
--- a/tools/push-to-trunk/releases.py
+++ b/tools/push-to-trunk/releases.py
@@ -26,7 +26,8 @@
 }
 
 # Expression for retrieving the bleeding edge revision from a commit message.
-PUSH_MESSAGE_RE = re.compile(r".* \(based on bleeding_edge revision r(\d+)\)$")
+PUSH_MSG_SVN_RE = re.compile(r".* \(based on bleeding_edge revision r(\d+)\)$")
+PUSH_MSG_GIT_RE = re.compile(r".* \(based on ([a-fA-F0-9]+)\)$")
 
 # Expression for retrieving the merged patches from a merge commit message
 # (old and new format).
@@ -128,7 +129,10 @@
             and len(releases) > self._options.max_releases)
 
   def GetBleedingEdgeFromPush(self, title):
-    return MatchSafe(PUSH_MESSAGE_RE.match(title))
+    return MatchSafe(PUSH_MSG_SVN_RE.match(title))
+
+  def GetBleedingEdgeGitFromPush(self, title):
+    return MatchSafe(PUSH_MSG_GIT_RE.match(title))
 
   def GetMergedPatches(self, body):
     patches = MatchSafe(MERGE_MESSAGE_RE.search(body))
@@ -189,6 +193,8 @@
     if bleeding_edge_revision:
       bleeding_edge_git = self.vc.SvnGit(bleeding_edge_revision,
                                          self.vc.RemoteMasterBranch())
+    else:
+      bleeding_edge_git = self.GetBleedingEdgeGitFromPush(title)
     return self.GetReleaseDict(
         git_hash, bleeding_edge_revision, bleeding_edge_git, branch, version,
         patches, body), self["patch"]